Merge pull request #1351 from TheBlueMatt/2022-03-scid-privacy
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Mon, 28 Mar 2022 20:33:55 +0000 (20:33 +0000)
committerGitHub <noreply@github.com>
Mon, 28 Mar 2022 20:33:55 +0000 (20:33 +0000)
Implement the SCIDAlias Channel Type and provide SCID Privacy

27 files changed:
.github/workflows/build.yml
fuzz/src/chanmon_consistency.rs
fuzz/src/full_stack.rs
lightning-background-processor/src/lib.rs
lightning-invoice/src/utils.rs
lightning-net-tokio/src/lib.rs
lightning/Cargo.toml
lightning/src/chain/keysinterface.rs
lightning/src/debug_sync.rs
lightning/src/ln/chan_utils.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/msgs.rs
lightning/src/ln/payment_tests.rs
lightning/src/ln/peer_handler.rs
lightning/src/ln/priv_short_conf_tests.rs
lightning/src/ln/shutdown_tests.rs
lightning/src/ln/wire.rs
lightning/src/routing/network_graph.rs
lightning/src/routing/router.rs
lightning/src/routing/scoring.rs
lightning/src/util/crypto.rs
lightning/src/util/events.rs
lightning/src/util/test_utils.rs

index 9b6abc71151650bcd3a0964cdf5284c3fd0a1503..90e08b398bccd3dd4ee6b8593bc74d1492f40030 100644 (file)
@@ -115,6 +115,9 @@ jobs:
           cargo test --verbose --color always --no-default-features --features no-std
           # check if there is a conflict between no-std and the default std feature
           cargo test --verbose --color always --features no-std
+          # check that things still pass without grind_signatures
+          # note that outbound_commitment_test only runs in this mode, because of hardcoded signature values
+          cargo test --verbose --color always --no-default-features --features std
           # check if there is a conflict between no-std and the c_bindings cfg
           RUSTFLAGS="--cfg=c_bindings" cargo test --verbose --color always --no-default-features --features=no-std
           cd ..
@@ -211,7 +214,7 @@ jobs:
           profile: minimal
       - name: Cache routing graph snapshot
         id: cache-graph
-        uses: actions/cache@v2
+        uses: actions/cache@v3
         with:
           path: lightning/net_graph-2021-05-31.bin
           key: ldk-net_graph-v0.0.15-2021-05-31.bin
index 79faba901581190742232aafe994fd969a5e0332..8c4f5adcb64ae64abbbc03b0956c556f3abbf44c 100644 (file)
@@ -411,8 +411,8 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
        let mut channel_txn = Vec::new();
        macro_rules! make_channel {
                ($source: expr, $dest: expr, $chan_id: expr) => { {
-                       $source.peer_connected(&$dest.get_our_node_id(), &Init { features: InitFeatures::known() });
-                       $dest.peer_connected(&$source.get_our_node_id(), &Init { features: InitFeatures::known() });
+                       $source.peer_connected(&$dest.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+                       $dest.peer_connected(&$source.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
 
                        $source.create_channel($dest.get_our_node_id(), 100_000, 42, 0, None).unwrap();
                        let open_channel = {
@@ -921,15 +921,15 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                        },
                        0x0e => {
                                if chan_a_disconnected {
-                                       nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known() });
-                                       nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: InitFeatures::known() });
+                                       nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+                                       nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
                                        chan_a_disconnected = false;
                                }
                        },
                        0x0f => {
                                if chan_b_disconnected {
-                                       nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: InitFeatures::known() });
-                                       nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known() });
+                                       nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+                                       nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
                                        chan_b_disconnected = false;
                                }
                        },
@@ -1124,13 +1124,13 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
 
                                // Next, make sure peers are all connected to each other
                                if chan_a_disconnected {
-                                       nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known() });
-                                       nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: InitFeatures::known() });
+                                       nodes[0].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+                                       nodes[1].peer_connected(&nodes[0].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
                                        chan_a_disconnected = false;
                                }
                                if chan_b_disconnected {
-                                       nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: InitFeatures::known() });
-                                       nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known() });
+                                       nodes[1].peer_connected(&nodes[2].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+                                       nodes[2].peer_connected(&nodes[1].get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
                                        chan_b_disconnected = false;
                                }
 
index a4e1a08648f3aac50413659f0a2bcea7985ca006..0412547a0089a150db7f8038d7c4454a8bf05b02 100644 (file)
@@ -422,7 +422,7 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
                                        }
                                }
                                if new_id == 0 { return; }
-                               loss_detector.handler.new_outbound_connection(get_pubkey!(), Peer{id: (new_id - 1) as u8, peers_connected: &peers}).unwrap();
+                               loss_detector.handler.new_outbound_connection(get_pubkey!(), Peer{id: (new_id - 1) as u8, peers_connected: &peers}, None).unwrap();
                                peers.borrow_mut()[new_id - 1] = true;
                        },
                        1 => {
@@ -434,7 +434,7 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
                                        }
                                }
                                if new_id == 0 { return; }
-                               loss_detector.handler.new_inbound_connection(Peer{id: (new_id - 1) as u8, peers_connected: &peers}).unwrap();
+                               loss_detector.handler.new_inbound_connection(Peer{id: (new_id - 1) as u8, peers_connected: &peers}, None).unwrap();
                                peers.borrow_mut()[new_id - 1] = true;
                        },
                        2 => {
index 084ea62efb6faf51f0ff67b0b5fb01fc61f965ef..22fef2661fa661d82884c047bb09697959841a15 100644 (file)
@@ -435,8 +435,8 @@ mod tests {
 
                for i in 0..num_nodes {
                        for j in (i+1)..num_nodes {
-                               nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: InitFeatures::known() });
-                               nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: InitFeatures::known() });
+                               nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+                               nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
                        }
                }
 
index 630411f03af6243115cc69357eeecc264e9bb018..62820cfe11007027ecc2ec7802a5a4946767e1c6 100644 (file)
@@ -3,9 +3,9 @@
 use {CreationError, Currency, DEFAULT_EXPIRY_TIME, Invoice, InvoiceBuilder, SignOrCreationError};
 use payment::{Payer, Router};
 
+use crate::{prelude::*, Description, InvoiceDescription, Sha256};
 use bech32::ToBase32;
 use bitcoin_hashes::{Hash, sha256};
-use crate::prelude::*;
 use lightning::chain;
 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
 use lightning::chain::keysinterface::{Recipient, KeysInterface, Sign};
@@ -50,14 +50,77 @@ use sync::Mutex;
 /// [`ChannelManager::get_phantom_route_hints`]: lightning::ln::channelmanager::ChannelManager::get_phantom_route_hints
 /// [`PhantomRouteHints::channels`]: lightning::ln::channelmanager::PhantomRouteHints::channels
 pub fn create_phantom_invoice<Signer: Sign, K: Deref>(
-       amt_msat: Option<u64>, description: String, payment_hash: PaymentHash, payment_secret:
-       PaymentSecret, phantom_route_hints: Vec<PhantomRouteHints>, keys_manager: K, network: Currency
+       amt_msat: Option<u64>, description: String, payment_hash: PaymentHash, payment_secret: PaymentSecret,
+       phantom_route_hints: Vec<PhantomRouteHints>, keys_manager: K, network: Currency,
 ) -> Result<Invoice, SignOrCreationError<()>> where K::Target: KeysInterface {
+       let description = Description::new(description).map_err(SignOrCreationError::CreationError)?;
+       let description = InvoiceDescription::Direct(&description,);
+       _create_phantom_invoice::<Signer, K>(
+               amt_msat, description, payment_hash, payment_secret, phantom_route_hints, keys_manager, network,
+       )
+}
+
+#[cfg(feature = "std")]
+/// Utility to create an invoice that can be paid to one of multiple nodes, or a "phantom invoice."
+/// See [`PhantomKeysManager`] for more information on phantom node payments.
+///
+/// `phantom_route_hints` parameter:
+/// * Contains channel info for all nodes participating in the phantom invoice
+/// * Entries are retrieved from a call to [`ChannelManager::get_phantom_route_hints`] on each
+///   participating node
+/// * It is fine to cache `phantom_route_hints` and reuse it across invoices, as long as the data is
+///   updated when a channel becomes disabled or closes
+/// * Note that if too many channels are included in [`PhantomRouteHints::channels`], the invoice
+///   may be too long for QR code scanning. To fix this, `PhantomRouteHints::channels` may be pared
+///   down
+///
+/// `description_hash` is a SHA-256 hash of the description text
+///
+/// `payment_hash` and `payment_secret` come from [`ChannelManager::create_inbound_payment`] or
+/// [`ChannelManager::create_inbound_payment_for_hash`]. These values can be retrieved from any
+/// participating node.
+///
+/// Note that the provided `keys_manager`'s `KeysInterface` implementation must support phantom
+/// invoices in its `sign_invoice` implementation ([`PhantomKeysManager`] satisfies this
+/// requirement).
+///
+/// [`PhantomKeysManager`]: lightning::chain::keysinterface::PhantomKeysManager
+/// [`ChannelManager::get_phantom_route_hints`]: lightning::ln::channelmanager::ChannelManager::get_phantom_route_hints
+/// [`PhantomRouteHints::channels`]: lightning::ln::channelmanager::PhantomRouteHints::channels
+pub fn create_phantom_invoice_with_description_hash<Signer: Sign, K: Deref>(
+       amt_msat: Option<u64>, description_hash: Sha256, payment_hash: PaymentHash,
+       payment_secret: PaymentSecret, phantom_route_hints: Vec<PhantomRouteHints>,
+       keys_manager: K, network: Currency,
+) -> Result<Invoice, SignOrCreationError<()>> where K::Target: KeysInterface
+{
+
+       _create_phantom_invoice::<Signer, K>(
+               amt_msat,
+               InvoiceDescription::Hash(&description_hash),
+               payment_hash, payment_secret, phantom_route_hints, keys_manager, network,
+       )
+}
+
+#[cfg(feature = "std")]
+fn _create_phantom_invoice<Signer: Sign, K: Deref>(
+       amt_msat: Option<u64>, description: InvoiceDescription, payment_hash: PaymentHash,
+       payment_secret: PaymentSecret, phantom_route_hints: Vec<PhantomRouteHints>,
+       keys_manager: K, network: Currency,
+) -> Result<Invoice, SignOrCreationError<()>> where K::Target: KeysInterface
+{
        if phantom_route_hints.len() == 0 {
-               return Err(SignOrCreationError::CreationError(CreationError::MissingRouteHints))
+               return Err(SignOrCreationError::CreationError(
+                       CreationError::MissingRouteHints,
+               ));
        }
-       let mut invoice = InvoiceBuilder::new(network)
-               .description(description)
+       let invoice = match description {
+               InvoiceDescription::Direct(description) => {
+                       InvoiceBuilder::new(network).description(description.0.clone())
+               }
+               InvoiceDescription::Hash(hash) => InvoiceBuilder::new(network).description_hash(hash.0),
+       };
+
+       let mut invoice = invoice
                .current_timestamp()
                .payment_hash(Hash::from_slice(&payment_hash.0).unwrap())
                .payment_secret(payment_secret)
@@ -66,40 +129,28 @@ pub fn create_phantom_invoice<Signer: Sign, K: Deref>(
                invoice = invoice.amount_milli_satoshis(amt);
        }
 
-       for hint in phantom_route_hints {
-               for channel in &hint.channels {
-                       let short_channel_id = match channel.get_inbound_payment_scid() {
-                               Some(id) => id,
-                               None => continue,
-                       };
-                       let forwarding_info = match &channel.counterparty.forwarding_info {
-                               Some(info) => info.clone(),
-                               None => continue,
-                       };
-                       invoice = invoice.private_route(RouteHint(vec![
-                                       RouteHintHop {
-                                               src_node_id: channel.counterparty.node_id,
-                                               short_channel_id,
-                                               fees: RoutingFees {
-                                                       base_msat: forwarding_info.fee_base_msat,
-                                                       proportional_millionths: forwarding_info.fee_proportional_millionths,
-                                               },
-                                               cltv_expiry_delta: forwarding_info.cltv_expiry_delta,
-                                               htlc_minimum_msat: None,
-                                               htlc_maximum_msat: None,
-                                       },
-                                       RouteHintHop {
-                                               src_node_id: hint.real_node_pubkey,
-                                               short_channel_id: hint.phantom_scid,
-                                               fees: RoutingFees {
-                                                       base_msat: 0,
-                                                       proportional_millionths: 0,
-                                               },
-                                               cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA,
-                                               htlc_minimum_msat: None,
-                                               htlc_maximum_msat: None,
-                                       }])
-                       );
+       for PhantomRouteHints { channels, phantom_scid, real_node_pubkey } in phantom_route_hints {
+               let mut route_hints = filter_channels(channels, amt_msat);
+
+               // If we have any public channel, the route hints from `filter_channels` will be empty.
+               // In that case we create a RouteHint on which we will push a single hop with the phantom
+               // route into the invoice, and let the sender find the path to the `real_node_pubkey`
+               // node by looking at our public channels.
+               if route_hints.is_empty() {
+                       route_hints.push(RouteHint(vec![]))
+               }
+               for mut route_hint in route_hints {
+                       route_hint.0.push(RouteHintHop {
+                               src_node_id: real_node_pubkey,
+                               short_channel_id: phantom_scid,
+                               fees: RoutingFees {
+                                       base_msat: 0,
+                                       proportional_millionths: 0,
+                               },
+                               cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA,
+                               htlc_minimum_msat: None,
+                               htlc_maximum_msat: None,});
+                       invoice = invoice.private_route(route_hint.clone());
                }
        }
 
@@ -138,12 +189,57 @@ where
        let duration = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
                .expect("for the foreseeable future this shouldn't happen");
        create_invoice_from_channelmanager_and_duration_since_epoch(
-               channelmanager,
-               keys_manager,
-               network,
-               amt_msat,
-               description,
-               duration
+               channelmanager, keys_manager, network, amt_msat, description, duration
+       )
+}
+
+#[cfg(feature = "std")]
+/// Utility to construct an invoice. Generally, unless you want to do something like a custom
+/// cltv_expiry, this is what you should be using to create an invoice. The reason being, this
+/// method stores the invoice's payment secret and preimage in `ChannelManager`, so (a) the user
+/// doesn't have to store preimage/payment secret information and (b) `ChannelManager` can verify
+/// that the payment secret is valid when the invoice is paid.
+/// Use this variant if you want to pass the `description_hash` to the invoice.
+pub fn create_invoice_from_channelmanager_with_description_hash<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>(
+       channelmanager: &ChannelManager<Signer, M, T, K, F, L>, keys_manager: K, network: Currency,
+       amt_msat: Option<u64>, description_hash: Sha256,
+) -> Result<Invoice, SignOrCreationError<()>>
+where
+       M::Target: chain::Watch<Signer>,
+       T::Target: BroadcasterInterface,
+       K::Target: KeysInterface<Signer = Signer>,
+       F::Target: FeeEstimator,
+       L::Target: Logger,
+{
+       use std::time::SystemTime;
+
+       let duration = SystemTime::now()
+               .duration_since(SystemTime::UNIX_EPOCH)
+               .expect("for the foreseeable future this shouldn't happen");
+
+       create_invoice_from_channelmanager_with_description_hash_and_duration_since_epoch(
+               channelmanager, keys_manager, network, amt_msat, description_hash, duration,
+       )
+}
+
+/// See [`create_invoice_from_channelmanager_with_description_hash`]
+/// This version can be used in a `no_std` environment, where [`std::time::SystemTime`] is not
+/// available and the current time is supplied by the caller.
+pub fn create_invoice_from_channelmanager_with_description_hash_and_duration_since_epoch<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>(
+       channelmanager: &ChannelManager<Signer, M, T, K, F, L>, keys_manager: K, network: Currency,
+       amt_msat: Option<u64>, description_hash: Sha256, duration_since_epoch: Duration,
+) -> Result<Invoice, SignOrCreationError<()>>
+where
+       M::Target: chain::Watch<Signer>,
+       T::Target: BroadcasterInterface,
+       K::Target: KeysInterface<Signer = Signer>,
+       F::Target: FeeEstimator,
+       L::Target: Logger,
+{
+       _create_invoice_from_channelmanager_and_duration_since_epoch(
+               channelmanager, keys_manager, network, amt_msat,
+               InvoiceDescription::Hash(&description_hash),
+               duration_since_epoch,
        )
 }
 
@@ -161,39 +257,43 @@ where
        F::Target: FeeEstimator,
        L::Target: Logger,
 {
-       // Marshall route hints.
-       let our_channels = channelmanager.list_usable_channels();
-       let mut route_hints = vec![];
-       for channel in our_channels {
-               let short_channel_id = match channel.get_inbound_payment_scid() {
-                       Some(id) => id,
-                       None => continue,
-               };
-               let forwarding_info = match channel.counterparty.forwarding_info {
-                       Some(info) => info,
-                       None => continue,
-               };
-               route_hints.push(RouteHint(vec![RouteHintHop {
-                       src_node_id: channel.counterparty.node_id,
-                       short_channel_id,
-                       fees: RoutingFees {
-                               base_msat: forwarding_info.fee_base_msat,
-                               proportional_millionths: forwarding_info.fee_proportional_millionths,
-                       },
-                       cltv_expiry_delta: forwarding_info.cltv_expiry_delta,
-                       htlc_minimum_msat: None,
-                       htlc_maximum_msat: None,
-               }]));
-       }
+       _create_invoice_from_channelmanager_and_duration_since_epoch(
+               channelmanager, keys_manager, network, amt_msat,
+               InvoiceDescription::Direct(
+                       &Description::new(description).map_err(SignOrCreationError::CreationError)?,
+               ),
+               duration_since_epoch,
+       )
+}
+
+fn _create_invoice_from_channelmanager_and_duration_since_epoch<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>(
+       channelmanager: &ChannelManager<Signer, M, T, K, F, L>, keys_manager: K, network: Currency,
+       amt_msat: Option<u64>, description: InvoiceDescription, duration_since_epoch: Duration,
+) -> Result<Invoice, SignOrCreationError<()>>
+where
+       M::Target: chain::Watch<Signer>,
+       T::Target: BroadcasterInterface,
+       K::Target: KeysInterface<Signer = Signer>,
+       F::Target: FeeEstimator,
+       L::Target: Logger,
+{
+       let route_hints = filter_channels(channelmanager.list_usable_channels(), amt_msat);
 
        // `create_inbound_payment` only returns an error if the amount is greater than the total bitcoin
        // supply.
-       let (payment_hash, payment_secret) = channelmanager.create_inbound_payment(
-               amt_msat, DEFAULT_EXPIRY_TIME.try_into().unwrap())
+       let (payment_hash, payment_secret) = channelmanager
+               .create_inbound_payment(amt_msat, DEFAULT_EXPIRY_TIME.try_into().unwrap())
                .map_err(|()| SignOrCreationError::CreationError(CreationError::InvalidAmount))?;
        let our_node_pubkey = channelmanager.get_our_node_id();
-       let mut invoice = InvoiceBuilder::new(network)
-               .description(description)
+
+       let invoice = match description {
+               InvoiceDescription::Direct(description) => {
+                       InvoiceBuilder::new(network).description(description.0.clone())
+               }
+               InvoiceDescription::Hash(hash) => InvoiceBuilder::new(network).description_hash(hash.0),
+       };
+
+       let mut invoice = invoice
                .duration_since_epoch(duration_since_epoch)
                .payee_pub_key(our_node_pubkey)
                .payment_hash(Hash::from_slice(&payment_hash.0).unwrap())
@@ -221,6 +321,74 @@ where
        }
 }
 
+/// Filters the `channels` for an invoice, and returns the corresponding `RouteHint`s to include
+/// in the invoice.
+///
+/// The filtering is based on the following criteria:
+/// * Only one channel per counterparty node
+/// * Always select the channel with the highest inbound capacity per counterparty node
+/// * Filter out channels with a lower inbound capacity than `min_inbound_capacity_msat`, if any
+/// channel with a higher or equal inbound capacity than `min_inbound_capacity_msat` exists
+/// * If any public channel exists, the returned `RouteHint`s will be empty, and the sender will
+/// need to find the path by looking at the public channels instead
+fn filter_channels(channels: Vec<ChannelDetails>, min_inbound_capacity_msat: Option<u64>) -> Vec<RouteHint>{
+       let mut filtered_channels: HashMap<PublicKey, &ChannelDetails> = HashMap::new();
+       let min_inbound_capacity = min_inbound_capacity_msat.unwrap_or(0);
+       let mut min_capacity_channel_exists = false;
+
+       for channel in channels.iter() {
+               if channel.get_inbound_payment_scid().is_none() || channel.counterparty.forwarding_info.is_none() {
+                       continue;
+               }
+
+               if channel.is_public {
+                       // If any public channel exists, return no hints and let the sender
+                       // look at the public channels instead.
+                       return vec![]
+               }
+
+               if channel.inbound_capacity_msat >= min_inbound_capacity {
+                       min_capacity_channel_exists = true;
+               };
+               match filtered_channels.entry(channel.counterparty.node_id) {
+                       hash_map::Entry::Occupied(mut entry) => {
+                               let current_max_capacity = entry.get().inbound_capacity_msat;
+                               if channel.inbound_capacity_msat < current_max_capacity {
+                                       continue;
+                               }
+                               entry.insert(channel);
+                       }
+                       hash_map::Entry::Vacant(entry) => {
+                               entry.insert(channel);
+                       }
+               }
+       }
+
+       let route_hint_from_channel = |channel: &ChannelDetails| {
+               let forwarding_info = channel.counterparty.forwarding_info.as_ref().unwrap();
+               RouteHint(vec![RouteHintHop {
+                       src_node_id: channel.counterparty.node_id,
+                       short_channel_id: channel.get_inbound_payment_scid().unwrap(),
+                       fees: RoutingFees {
+                               base_msat: forwarding_info.fee_base_msat,
+                               proportional_millionths: forwarding_info.fee_proportional_millionths,
+                       },
+                       cltv_expiry_delta: forwarding_info.cltv_expiry_delta,
+                       htlc_minimum_msat: None,
+                       htlc_maximum_msat: None,}])
+       };
+       // If all channels are private, return the route hint for the highest inbound capacity channel
+       // per counterparty node. If channels with an higher inbound capacity than the
+       // min_inbound_capacity exists, filter out the channels with a lower capacity than that.
+       filtered_channels.into_iter()
+               .filter(|(_counterparty_id, channel)| {
+                       min_capacity_channel_exists && channel.inbound_capacity_msat >= min_inbound_capacity ||
+                       !min_capacity_channel_exists
+               })
+               .map(|(_counterparty_id, channel)| route_hint_from_channel(&channel))
+               .collect::<Vec<RouteHint>>()
+}
+
 /// A [`Router`] implemented using [`find_route`].
 pub struct DefaultRouter<G: Deref<Target = NetworkGraph>, L: Deref> where L::Target: Logger {
        network_graph: G,
@@ -300,7 +468,7 @@ mod test {
        use bitcoin_hashes::sha256::Hash as Sha256;
        use lightning::chain::keysinterface::PhantomKeysManager;
        use lightning::ln::{PaymentPreimage, PaymentHash};
-       use lightning::ln::channelmanager::MIN_FINAL_CLTV_EXPIRY;
+       use lightning::ln::channelmanager::{PhantomRouteHints, MIN_FINAL_CLTV_EXPIRY};
        use lightning::ln::functional_test_utils::*;
        use lightning::ln::features::InitFeatures;
        use lightning::ln::msgs::ChannelMessageHandler;
@@ -308,8 +476,10 @@ mod test {
        use lightning::util::enforcing_trait_impls::EnforcingSigner;
        use lightning::util::events::{MessageSendEvent, MessageSendEventsProvider, Event};
        use lightning::util::test_utils;
+       use lightning::util::config::UserConfig;
        use lightning::chain::keysinterface::KeysInterface;
        use utils::create_invoice_from_channelmanager_and_duration_since_epoch;
+       use std::collections::HashSet;
 
        #[test]
        fn test_from_channelmanager() {
@@ -317,7 +487,7 @@ mod test {
                let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
                let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-               let _chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+               create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
                let invoice = create_invoice_from_channelmanager_and_duration_since_epoch(
                        &nodes[1].node, nodes[1].keys_manager, Currency::BitcoinTestnet, Some(10_000), "test".to_string(),
                        Duration::from_secs(1234567)).unwrap();
@@ -372,6 +542,185 @@ mod test {
                assert_eq!(events.len(), 2);
        }
 
+       #[test]
+       fn test_create_invoice_with_description_hash() {
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+               let description_hash = crate::Sha256(Hash::hash("Testing description_hash".as_bytes()));
+               let invoice = ::utils::create_invoice_from_channelmanager_with_description_hash_and_duration_since_epoch(
+                       &nodes[1].node, nodes[1].keys_manager, Currency::BitcoinTestnet, Some(10_000),
+                       description_hash, Duration::from_secs(1234567),
+               ).unwrap();
+               assert_eq!(invoice.amount_pico_btc(), Some(100_000));
+               assert_eq!(invoice.min_final_cltv_expiry(), MIN_FINAL_CLTV_EXPIRY as u64);
+               assert_eq!(invoice.description(), InvoiceDescription::Hash(&crate::Sha256(Sha256::hash("Testing description_hash".as_bytes()))));
+       }
+
+       #[test]
+       fn test_hints_includes_single_channels_to_nodes() {
+               let chanmon_cfgs = create_chanmon_cfgs(3);
+               let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+               let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+               let chan_1_0 = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 0, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+               let chan_2_0 = create_unannounced_chan_between_nodes_with_value(&nodes, 2, 0, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+
+               let mut scid_aliases = HashSet::new();
+               scid_aliases.insert(chan_1_0.0.short_channel_id_alias.unwrap());
+               scid_aliases.insert(chan_2_0.0.short_channel_id_alias.unwrap());
+
+               match_invoice_routes(Some(5000), &nodes[0], scid_aliases);
+       }
+
+       #[test]
+       fn test_hints_has_only_highest_inbound_capacity_channel() {
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+               let _chan_1_0_low_inbound_capacity = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 0, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+               let chan_1_0_high_inbound_capacity = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 0, 10_000_000, 0, InitFeatures::known(), InitFeatures::known());
+               let _chan_1_0_medium_inbound_capacity = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 0, 1_000_000, 0, InitFeatures::known(), InitFeatures::known());
+
+               let mut scid_aliases = HashSet::new();
+               scid_aliases.insert(chan_1_0_high_inbound_capacity.0.short_channel_id_alias.unwrap());
+
+               match_invoice_routes(Some(5000), &nodes[0], scid_aliases);
+       }
+
+       #[test]
+       fn test_forwarding_info_not_assigned_channel_excluded_from_hints() {
+               let chanmon_cfgs = create_chanmon_cfgs(3);
+               let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+               let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+               let chan_1_0 = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 0, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+
+               // Create an unannonced channel between `nodes[2]` and `nodes[0]`, for which the
+               // `msgs::ChannelUpdate` is never handled for the node(s). As the `msgs::ChannelUpdate`
+               // is never handled, the `channel.counterparty.forwarding_info` is never assigned.
+               let mut private_chan_cfg = UserConfig::default();
+               private_chan_cfg.channel_options.announced_channel = false;
+               let temporary_channel_id = nodes[2].node.create_channel(nodes[0].node.get_our_node_id(), 1_000_000, 500_000_000, 42, Some(private_chan_cfg)).unwrap();
+               let open_channel = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_open_channel(&nodes[2].node.get_our_node_id(), InitFeatures::known(), &open_channel);
+               let accept_channel = get_event_msg!(nodes[0], MessageSendEvent::SendAcceptChannel, nodes[2].node.get_our_node_id());
+               nodes[2].node.handle_accept_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
+
+               let tx = sign_funding_transaction(&nodes[2], &nodes[0], 1_000_000, temporary_channel_id);
+
+               let conf_height = core::cmp::max(nodes[2].best_block_info().1 + 1, nodes[0].best_block_info().1 + 1);
+               confirm_transaction_at(&nodes[2], &tx, conf_height);
+               connect_blocks(&nodes[2], CHAN_CONFIRM_DEPTH - 1);
+               confirm_transaction_at(&nodes[0], &tx, conf_height);
+               connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH - 1);
+               let as_funding_locked = get_event_msg!(nodes[2], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id());
+               nodes[2].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[2].node.get_our_node_id()));
+               get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_funding_locked(&nodes[2].node.get_our_node_id(), &as_funding_locked);
+               get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[2].node.get_our_node_id());
+
+               // As `msgs::ChannelUpdate` was never handled for the participating node(s) of the second
+               // channel, the channel will never be assigned any `counterparty.forwarding_info`.
+               // Therefore only `chan_1_0` should be included in the hints.
+               let mut scid_aliases = HashSet::new();
+               scid_aliases.insert(chan_1_0.0.short_channel_id_alias.unwrap());
+               match_invoice_routes(Some(5000), &nodes[0], scid_aliases);
+       }
+
+       #[test]
+       fn test_no_hints_if_a_mix_between_public_and_private_channel_exists() {
+               let chanmon_cfgs = create_chanmon_cfgs(3);
+               let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+               let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+               let _chan_1_0 = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 0, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+
+               let chan_2_0 = create_announced_chan_between_nodes_with_value(&nodes, 2, 0, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+               nodes[2].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &chan_2_0.1);
+               nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan_2_0.0);
+
+               // Ensure that the invoice doesn't include any route hints for any of `nodes[0]` channels,
+               // even though all channels between `nodes[1]` and `nodes[0]` are private, as there is a
+               // public channel between `nodes[2]` and `nodes[0]`
+               match_invoice_routes(Some(5000), &nodes[0], HashSet::new());
+       }
+
+       #[test]
+       fn test_only_public_channels_includes_no_channels_in_hints() {
+               let chanmon_cfgs = create_chanmon_cfgs(3);
+               let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+               let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+               let chan_1_0 = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+               nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan_1_0.0);
+               nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &chan_1_0.1);
+
+               let chan_2_0 = create_announced_chan_between_nodes_with_value(&nodes, 2, 0, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+               nodes[2].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &chan_2_0.1);
+               nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan_2_0.0);
+
+               // As all of `nodes[0]` channels are public, no channels should be included in the hints
+               match_invoice_routes(Some(5000), &nodes[0], HashSet::new());
+       }
+
+       #[test]
+       fn test_channels_with_lower_inbound_capacity_than_invoice_amt_hints_filtering() {
+               let chanmon_cfgs = create_chanmon_cfgs(3);
+               let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+               let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+               let chan_1_0 = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 0, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+               let chan_2_0 = create_unannounced_chan_between_nodes_with_value(&nodes, 2, 0, 1_000_000, 0, InitFeatures::known(), InitFeatures::known());
+
+               // As the invoice amt is 1 msat above chan_1_0's inbound capacity, it shouldn't be included
+               let mut scid_aliases_99_000_001_msat = HashSet::new();
+               scid_aliases_99_000_001_msat.insert(chan_2_0.0.short_channel_id_alias.unwrap());
+
+               match_invoice_routes(Some(99_000_001), &nodes[0], scid_aliases_99_000_001_msat);
+
+               // As the invoice amt is exactly at chan_1_0's inbound capacity, it should be included
+               let mut scid_aliases_99_000_000_msat = HashSet::new();
+               scid_aliases_99_000_000_msat.insert(chan_1_0.0.short_channel_id_alias.unwrap());
+               scid_aliases_99_000_000_msat.insert(chan_2_0.0.short_channel_id_alias.unwrap());
+
+               match_invoice_routes(Some(99_000_000), &nodes[0], scid_aliases_99_000_000_msat);
+
+               // As the invoice amt is above all channels' inbound capacity, they will still be included
+               let mut scid_aliases_2_000_000_000_msat = HashSet::new();
+               scid_aliases_2_000_000_000_msat.insert(chan_1_0.0.short_channel_id_alias.unwrap());
+               scid_aliases_2_000_000_000_msat.insert(chan_2_0.0.short_channel_id_alias.unwrap());
+
+               match_invoice_routes(Some(2_000_000_000), &nodes[0], scid_aliases_2_000_000_000_msat);
+
+               // An invoice with no specified amount should include all channels in the route hints.
+               let mut scid_aliases_no_specified_amount = HashSet::new();
+               scid_aliases_no_specified_amount.insert(chan_1_0.0.short_channel_id_alias.unwrap());
+               scid_aliases_no_specified_amount.insert(chan_2_0.0.short_channel_id_alias.unwrap());
+
+               match_invoice_routes(None, &nodes[0], scid_aliases_no_specified_amount);
+       }
+
+       fn match_invoice_routes<'a, 'b: 'a, 'c: 'b>(
+               invoice_amt: Option<u64>,
+               invoice_node: &Node<'a, 'b, 'c>,
+               mut chan_ids_to_match: HashSet<u64>
+       ) {
+               let invoice = create_invoice_from_channelmanager_and_duration_since_epoch(
+                       &invoice_node.node, invoice_node.keys_manager, Currency::BitcoinTestnet, invoice_amt, "test".to_string(),
+                       Duration::from_secs(1234567)).unwrap();
+               let hints = invoice.private_routes();
+
+               for hint in hints {
+                       let hint_short_chan_id = (hint.0).0[0].short_channel_id;
+                       assert!(chan_ids_to_match.remove(&hint_short_chan_id));
+               }
+               assert!(chan_ids_to_match.is_empty(), "Unmatched short channel ids: {:?}", chan_ids_to_match);
+       }
+
        #[test]
        #[cfg(feature = "std")]
        fn test_multi_node_receive() {
@@ -489,4 +838,355 @@ mod test {
                        _ => panic!("Unexpected event")
                }
        }
+
+       #[test]
+       #[cfg(feature = "std")]
+       fn create_phantom_invoice_with_description_hash() {
+               let chanmon_cfgs = create_chanmon_cfgs(3);
+               let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+               let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+               let payment_amt = 20_000;
+               let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(payment_amt), 3600).unwrap();
+               let route_hints = vec![
+                       nodes[1].node.get_phantom_route_hints(),
+                       nodes[2].node.get_phantom_route_hints(),
+               ];
+
+               let description_hash = crate::Sha256(Hash::hash("Description hash phantom invoice".as_bytes()));
+               let invoice = ::utils::create_phantom_invoice_with_description_hash::<EnforcingSigner,&test_utils::TestKeysInterface>(Some(payment_amt), description_hash, payment_hash, payment_secret, route_hints, &nodes[1].keys_manager, Currency::BitcoinTestnet).unwrap();
+
+               assert_eq!(invoice.amount_pico_btc(), Some(200_000));
+               assert_eq!(invoice.min_final_cltv_expiry(), MIN_FINAL_CLTV_EXPIRY as u64);
+               assert_eq!(invoice.description(), InvoiceDescription::Hash(&crate::Sha256(Sha256::hash("Description hash phantom invoice".as_bytes()))));
+       }
+
+       #[test]
+       #[cfg(feature = "std")]
+       fn test_multi_node_hints_includes_single_channels_to_participating_nodes() {
+               let mut chanmon_cfgs = create_chanmon_cfgs(3);
+               let seed_1 = [42 as u8; 32];
+               let seed_2 = [43 as u8; 32];
+               let cross_node_seed = [44 as u8; 32];
+               chanmon_cfgs[1].keys_manager.backing = PhantomKeysManager::new(&seed_1, 43, 44, &cross_node_seed);
+               chanmon_cfgs[2].keys_manager.backing = PhantomKeysManager::new(&seed_2, 43, 44, &cross_node_seed);
+               let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+               let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+               let chan_0_1 = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+               let chan_0_2 = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+
+               let mut scid_aliases = HashSet::new();
+               scid_aliases.insert(chan_0_1.0.short_channel_id_alias.unwrap());
+               scid_aliases.insert(chan_0_2.0.short_channel_id_alias.unwrap());
+
+               match_multi_node_invoice_routes(
+                       Some(10_000),
+                       &nodes[1],
+                       vec![&nodes[1], &nodes[2],],
+                       scid_aliases,
+                       false
+               );
+       }
+
+       #[test]
+       #[cfg(feature = "std")]
+       fn test_multi_node_hints_includes_one_channel_of_each_counterparty_nodes_per_participating_node() {
+               let mut chanmon_cfgs = create_chanmon_cfgs(4);
+               let seed_1 = [42 as u8; 32];
+               let seed_2 = [43 as u8; 32];
+               let cross_node_seed = [44 as u8; 32];
+               chanmon_cfgs[2].keys_manager.backing = PhantomKeysManager::new(&seed_1, 43, 44, &cross_node_seed);
+               chanmon_cfgs[3].keys_manager.backing = PhantomKeysManager::new(&seed_2, 43, 44, &cross_node_seed);
+               let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+               let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+               let chan_0_2 = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+               let chan_0_3 = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 3, 1000000, 10001, InitFeatures::known(), InitFeatures::known());
+               let chan_1_3 = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 3, 3_000_000, 10005, InitFeatures::known(), InitFeatures::known());
+
+               let mut scid_aliases = HashSet::new();
+               scid_aliases.insert(chan_0_2.0.short_channel_id_alias.unwrap());
+               scid_aliases.insert(chan_0_3.0.short_channel_id_alias.unwrap());
+               scid_aliases.insert(chan_1_3.0.short_channel_id_alias.unwrap());
+
+               match_multi_node_invoice_routes(
+                       Some(10_000),
+                       &nodes[2],
+                       vec![&nodes[2], &nodes[3],],
+                       scid_aliases,
+                       false
+               );
+       }
+
+       #[test]
+       #[cfg(feature = "std")]
+       fn test_multi_node_forwarding_info_not_assigned_channel_excluded_from_hints() {
+               let mut chanmon_cfgs = create_chanmon_cfgs(4);
+               let seed_1 = [42 as u8; 32];
+               let seed_2 = [43 as u8; 32];
+               let cross_node_seed = [44 as u8; 32];
+               chanmon_cfgs[2].keys_manager.backing = PhantomKeysManager::new(&seed_1, 43, 44, &cross_node_seed);
+               chanmon_cfgs[3].keys_manager.backing = PhantomKeysManager::new(&seed_2, 43, 44, &cross_node_seed);
+               let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+               let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+               let chan_0_2 = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+               let chan_0_3 = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 3, 1000000, 10001, InitFeatures::known(), InitFeatures::known());
+
+               // Create an unannonced channel between `nodes[1]` and `nodes[3]`, for which the
+               // `msgs::ChannelUpdate` is never handled for the node(s). As the `msgs::ChannelUpdate`
+               // is never handled, the `channel.counterparty.forwarding_info` is never assigned.
+               let mut private_chan_cfg = UserConfig::default();
+               private_chan_cfg.channel_options.announced_channel = false;
+               let temporary_channel_id = nodes[1].node.create_channel(nodes[3].node.get_our_node_id(), 1_000_000, 500_000_000, 42, Some(private_chan_cfg)).unwrap();
+               let open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[3].node.get_our_node_id());
+               nodes[3].node.handle_open_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel);
+               let accept_channel = get_event_msg!(nodes[3], MessageSendEvent::SendAcceptChannel, nodes[1].node.get_our_node_id());
+               nodes[1].node.handle_accept_channel(&nodes[3].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
+
+               let tx = sign_funding_transaction(&nodes[1], &nodes[3], 1_000_000, temporary_channel_id);
+
+               let conf_height = core::cmp::max(nodes[1].best_block_info().1 + 1, nodes[3].best_block_info().1 + 1);
+               confirm_transaction_at(&nodes[1], &tx, conf_height);
+               connect_blocks(&nodes[1], CHAN_CONFIRM_DEPTH - 1);
+               confirm_transaction_at(&nodes[3], &tx, conf_height);
+               connect_blocks(&nodes[3], CHAN_CONFIRM_DEPTH - 1);
+               let as_funding_locked = get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[3].node.get_our_node_id());
+               nodes[1].node.handle_funding_locked(&nodes[3].node.get_our_node_id(), &get_event_msg!(nodes[3], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id()));
+               get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[3].node.get_our_node_id());
+               nodes[3].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &as_funding_locked);
+               get_event_msg!(nodes[3], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
+
+               // As `msgs::ChannelUpdate` was never handled for the participating node(s) of the third
+               // channel, the channel will never be assigned any `counterparty.forwarding_info`.
+               // Therefore only `chan_0_3` should be included in the hints for `nodes[3]`.
+               let mut scid_aliases = HashSet::new();
+               scid_aliases.insert(chan_0_2.0.short_channel_id_alias.unwrap());
+               scid_aliases.insert(chan_0_3.0.short_channel_id_alias.unwrap());
+
+               match_multi_node_invoice_routes(
+                       Some(10_000),
+                       &nodes[2],
+                       vec![&nodes[2], &nodes[3],],
+                       scid_aliases,
+                       false
+               );
+       }
+
+       #[test]
+       #[cfg(feature = "std")]
+       fn test_multi_node_with_only_public_channels_hints_includes_only_phantom_route() {
+               let mut chanmon_cfgs = create_chanmon_cfgs(3);
+               let seed_1 = [42 as u8; 32];
+               let seed_2 = [43 as u8; 32];
+               let cross_node_seed = [44 as u8; 32];
+               chanmon_cfgs[1].keys_manager.backing = PhantomKeysManager::new(&seed_1, 43, 44, &cross_node_seed);
+               chanmon_cfgs[2].keys_manager.backing = PhantomKeysManager::new(&seed_2, 43, 44, &cross_node_seed);
+               let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+               let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+               let chan_0_1 = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+
+               let chan_2_0 = create_announced_chan_between_nodes_with_value(&nodes, 2, 0, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+               nodes[2].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &chan_2_0.1);
+               nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan_2_0.0);
+
+               // Hints should include `chan_0_1` from as `nodes[1]` only have private channels, but not
+               // `chan_0_2` as `nodes[2]` only has public channels.
+               let mut scid_aliases = HashSet::new();
+               scid_aliases.insert(chan_0_1.0.short_channel_id_alias.unwrap());
+
+               match_multi_node_invoice_routes(
+                       Some(10_000),
+                       &nodes[1],
+                       vec![&nodes[1], &nodes[2],],
+                       scid_aliases,
+                       true
+               );
+       }
+
+       #[test]
+       #[cfg(feature = "std")]
+       fn test_multi_node_with_mixed_public_and_private_channel_hints_includes_only_phantom_route() {
+               let mut chanmon_cfgs = create_chanmon_cfgs(4);
+               let seed_1 = [42 as u8; 32];
+               let seed_2 = [43 as u8; 32];
+               let cross_node_seed = [44 as u8; 32];
+               chanmon_cfgs[1].keys_manager.backing = PhantomKeysManager::new(&seed_1, 43, 44, &cross_node_seed);
+               chanmon_cfgs[2].keys_manager.backing = PhantomKeysManager::new(&seed_2, 43, 44, &cross_node_seed);
+               let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+               let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+               let chan_0_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+               nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan_0_2.1);
+               nodes[2].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &chan_0_2.0);
+               let _chan_1_2 = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+
+               let chan_0_3 = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 3, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+
+               // Hints should include `chan_0_3` from as `nodes[3]` only have private channels, and no
+               // channels for `nodes[2]` as it contains a mix of public and private channels.
+               let mut scid_aliases = HashSet::new();
+               scid_aliases.insert(chan_0_3.0.short_channel_id_alias.unwrap());
+
+               match_multi_node_invoice_routes(
+                       Some(10_000),
+                       &nodes[2],
+                       vec![&nodes[2], &nodes[3],],
+                       scid_aliases,
+                       true
+               );
+       }
+
+       #[test]
+       #[cfg(feature = "std")]
+       fn test_multi_node_hints_has_only_highest_inbound_capacity_channel() {
+               let mut chanmon_cfgs = create_chanmon_cfgs(3);
+               let seed_1 = [42 as u8; 32];
+               let seed_2 = [43 as u8; 32];
+               let cross_node_seed = [44 as u8; 32];
+               chanmon_cfgs[1].keys_manager.backing = PhantomKeysManager::new(&seed_1, 43, 44, &cross_node_seed);
+               chanmon_cfgs[2].keys_manager.backing = PhantomKeysManager::new(&seed_2, 43, 44, &cross_node_seed);
+               let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+               let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+               let _chan_0_1_low_inbound_capacity = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+               let chan_0_1_high_inbound_capacity = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0, InitFeatures::known(), InitFeatures::known());
+               let _chan_0_1_medium_inbound_capacity = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0, InitFeatures::known(), InitFeatures::known());
+               let chan_0_2 = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+
+               let mut scid_aliases = HashSet::new();
+               scid_aliases.insert(chan_0_1_high_inbound_capacity.0.short_channel_id_alias.unwrap());
+               scid_aliases.insert(chan_0_2.0.short_channel_id_alias.unwrap());
+
+               match_multi_node_invoice_routes(
+                       Some(10_000),
+                       &nodes[1],
+                       vec![&nodes[1], &nodes[2],],
+                       scid_aliases,
+                       false
+               );
+       }
+
+       #[test]
+       #[cfg(feature = "std")]
+       fn test_multi_node_channels_inbound_capacity_lower_than_invoice_amt_filtering() {
+               let mut chanmon_cfgs = create_chanmon_cfgs(4);
+               let seed_1 = [42 as u8; 32];
+               let seed_2 = [43 as u8; 32];
+               let cross_node_seed = [44 as u8; 32];
+               chanmon_cfgs[1].keys_manager.backing = PhantomKeysManager::new(&seed_1, 43, 44, &cross_node_seed);
+               chanmon_cfgs[2].keys_manager.backing = PhantomKeysManager::new(&seed_2, 43, 44, &cross_node_seed);
+               let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+               let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+               let chan_0_2 = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 2, 1_000_000, 0, InitFeatures::known(), InitFeatures::known());
+               let chan_0_3 = create_unannounced_chan_between_nodes_with_value(&nodes, 0, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+               let chan_1_3 = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 3, 200_000, 0, InitFeatures::known(), InitFeatures::known());
+
+               // Since the invoice 1 msat above chan_0_3's inbound capacity, it should be filtered out.
+               let mut scid_aliases_99_000_001_msat = HashSet::new();
+               scid_aliases_99_000_001_msat.insert(chan_0_2.0.short_channel_id_alias.unwrap());
+               scid_aliases_99_000_001_msat.insert(chan_1_3.0.short_channel_id_alias.unwrap());
+
+               match_multi_node_invoice_routes(
+                       Some(99_000_001),
+                       &nodes[2],
+                       vec![&nodes[2], &nodes[3],],
+                       scid_aliases_99_000_001_msat,
+                       false
+               );
+
+               // Since the invoice is exactly at chan_0_3's inbound capacity, it should be included.
+               let mut scid_aliases_99_000_000_msat = HashSet::new();
+               scid_aliases_99_000_000_msat.insert(chan_0_2.0.short_channel_id_alias.unwrap());
+               scid_aliases_99_000_000_msat.insert(chan_0_3.0.short_channel_id_alias.unwrap());
+               scid_aliases_99_000_000_msat.insert(chan_1_3.0.short_channel_id_alias.unwrap());
+
+               match_multi_node_invoice_routes(
+                       Some(99_000_000),
+                       &nodes[2],
+                       vec![&nodes[2], &nodes[3],],
+                       scid_aliases_99_000_000_msat,
+                       false
+               );
+
+               // Since the invoice is above all of `nodes[2]` channels' inbound capacity, all of
+               // `nodes[2]` them should be included.
+               let mut scid_aliases_300_000_000_msat = HashSet::new();
+               scid_aliases_300_000_000_msat.insert(chan_0_2.0.short_channel_id_alias.unwrap());
+               scid_aliases_300_000_000_msat.insert(chan_0_3.0.short_channel_id_alias.unwrap());
+               scid_aliases_300_000_000_msat.insert(chan_1_3.0.short_channel_id_alias.unwrap());
+
+               match_multi_node_invoice_routes(
+                       Some(300_000_000),
+                       &nodes[2],
+                       vec![&nodes[2], &nodes[3],],
+                       scid_aliases_300_000_000_msat,
+                       false
+               );
+
+               // Since the no specified amount, all channels should included.
+               let mut scid_aliases_no_specified_amount = HashSet::new();
+               scid_aliases_no_specified_amount.insert(chan_0_2.0.short_channel_id_alias.unwrap());
+               scid_aliases_no_specified_amount.insert(chan_0_3.0.short_channel_id_alias.unwrap());
+               scid_aliases_no_specified_amount.insert(chan_1_3.0.short_channel_id_alias.unwrap());
+
+               match_multi_node_invoice_routes(
+                       None,
+                       &nodes[2],
+                       vec![&nodes[2], &nodes[3],],
+                       scid_aliases_no_specified_amount,
+                       false
+               );
+       }
+
+       #[cfg(feature = "std")]
+       fn match_multi_node_invoice_routes<'a, 'b: 'a, 'c: 'b>(
+               invoice_amt: Option<u64>,
+               invoice_node: &Node<'a, 'b, 'c>,
+               network_multi_nodes: Vec<&Node<'a, 'b, 'c>>,
+               mut chan_ids_to_match: HashSet<u64>,
+               nodes_contains_public_channels: bool
+       ){
+               let (payment_hash, payment_secret) = invoice_node.node.create_inbound_payment(invoice_amt, 3600).unwrap();
+               let phantom_route_hints = network_multi_nodes.iter()
+                       .map(|node| node.node.get_phantom_route_hints())
+                       .collect::<Vec<PhantomRouteHints>>();
+               let phantom_scids = phantom_route_hints.iter()
+                       .map(|route_hint| route_hint.phantom_scid)
+                       .collect::<HashSet<u64>>();
+
+               let invoice = ::utils::create_phantom_invoice::<EnforcingSigner, &test_utils::TestKeysInterface>(invoice_amt, "test".to_string(), payment_hash, payment_secret, phantom_route_hints, &invoice_node.keys_manager, Currency::BitcoinTestnet).unwrap();
+
+               let invoice_hints = invoice.private_routes();
+
+               for hint in invoice_hints {
+                       let hints = &(hint.0).0;
+                       match hints.len() {
+                               1 => {
+                                       assert!(nodes_contains_public_channels);
+                                       let phantom_scid = hints[0].short_channel_id;
+                                       assert!(phantom_scids.contains(&phantom_scid));
+                               },
+                               2 => {
+                                       let hint_short_chan_id = hints[0].short_channel_id;
+                                       assert!(chan_ids_to_match.remove(&hint_short_chan_id));
+                                       let phantom_scid = hints[1].short_channel_id;
+                                       assert!(phantom_scids.contains(&phantom_scid));
+                               },
+                               _ => panic!("Incorrect hint length generated")
+                       }
+               }
+               assert!(chan_ids_to_match.is_empty(), "Unmatched short channel ids: {:?}", chan_ids_to_match);
+       }
 }
index 2582cc597f20d361906271811e8d523f5977516e..a9fd861bc846e440fd8ff54ab31deedf622aa8e6 100644 (file)
@@ -81,10 +81,11 @@ use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt};
 use lightning::ln::peer_handler;
 use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
 use lightning::ln::peer_handler::CustomMessageHandler;
-use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
+use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, NetAddress};
 use lightning::util::logger::Logger;
 
 use std::task;
+use std::net::IpAddr;
 use std::net::SocketAddr;
 use std::net::TcpStream as StdTcpStream;
 use std::sync::{Arc, Mutex};
@@ -222,11 +223,21 @@ pub fn setup_inbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManag
                RMH: RoutingMessageHandler + 'static + Send + Sync,
                L: Logger + 'static + ?Sized + Send + Sync,
                UMH: CustomMessageHandler + 'static + Send + Sync {
+       let ip_addr = stream.peer_addr().unwrap();
        let (reader, write_receiver, read_receiver, us) = Connection::new(stream);
        #[cfg(debug_assertions)]
        let last_us = Arc::clone(&us);
 
-       let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone())) {
+       let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), match ip_addr.ip() {
+               IpAddr::V4(ip) => Some(NetAddress::IPv4 {
+                       addr: ip.octets(),
+                       port: ip_addr.port(),
+               }),
+               IpAddr::V6(ip) => Some(NetAddress::IPv6 {
+                       addr: ip.octets(),
+                       port: ip_addr.port(),
+               }),
+       }) {
                Some(tokio::spawn(Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver)))
        } else {
                // Note that we will skip socket_disconnected here, in accordance with the PeerManager
@@ -263,11 +274,20 @@ pub fn setup_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerMana
                RMH: RoutingMessageHandler + 'static + Send + Sync,
                L: Logger + 'static + ?Sized + Send + Sync,
                UMH: CustomMessageHandler + 'static + Send + Sync {
+       let ip_addr = stream.peer_addr().unwrap();
        let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream);
        #[cfg(debug_assertions)]
        let last_us = Arc::clone(&us);
-
-       let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone())) {
+       let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), match ip_addr.ip() {
+               IpAddr::V4(ip) => Some(NetAddress::IPv4 {
+                       addr: ip.octets(),
+                       port: ip_addr.port(),
+               }),
+               IpAddr::V6(ip) => Some(NetAddress::IPv6 {
+                       addr: ip.octets(),
+                       port: ip_addr.port(),
+               }),
+       }) {
                Some(tokio::spawn(async move {
                        // We should essentially always have enough room in a TCP socket buffer to send the
                        // initial 10s of bytes. However, tokio running in single-threaded mode will always
@@ -496,7 +516,7 @@ mod tests {
                fn handle_channel_update(&self, _msg: &ChannelUpdate) -> Result<bool, LightningError> { Ok(false) }
                fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) -> Vec<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> { Vec::new() }
                fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec<NodeAnnouncement> { Vec::new() }
-               fn sync_routing_table(&self, _their_node_id: &PublicKey, _init_msg: &Init) { }
+               fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init) { }
                fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
                fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
                fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: QueryChannelRange) -> Result<(), LightningError> { Ok(()) }
index f38bb80511189ccaf92cce9bbb4a734949ff0e3b..2b0c2d16b7a3bf9f108157ad981fc1893dab6107 100644 (file)
@@ -32,7 +32,10 @@ _bench_unstable = []
 no-std = ["hashbrown", "bitcoin/no-std", "core2/alloc"]
 std = ["bitcoin/std"]
 
-default = ["std"]
+# Generates low-r bitcoin signatures, which saves 1 byte in 50% of the cases
+grind_signatures = []
+
+default = ["std", "grind_signatures"]
 
 [dependencies]
 bitcoin = { version = "0.27", default-features = false, features = ["secp-recovery"] }
index 1daeec4ef62354a1fb9f4a5597ad7678a270ffe3..be31036220a5a720e27d561697aad4a5daa7869c 100644 (file)
@@ -31,7 +31,7 @@ use bitcoin::secp256k1::recovery::RecoverableSignature;
 use bitcoin::secp256k1;
 
 use util::{byte_utils, transaction_utils};
-use util::crypto::hkdf_extract_expand_twice;
+use util::crypto::{hkdf_extract_expand_twice, sign};
 use util::ser::{Writeable, Writer, Readable, ReadableArgs};
 
 use chain::transaction::OutPoint;
@@ -590,7 +590,7 @@ impl InMemorySigner {
                let remotepubkey = self.pubkeys().payment_point;
                let witness_script = bitcoin::Address::p2pkh(&::bitcoin::PublicKey{compressed: true, key: remotepubkey}, Network::Testnet).script_pubkey();
                let sighash = hash_to_message!(&bip143::SigHashCache::new(spend_tx).signature_hash(input_idx, &witness_script, descriptor.output.value, SigHashType::All)[..]);
-               let remotesig = secp_ctx.sign(&sighash, &self.payment_key);
+               let remotesig = sign(secp_ctx, &sighash, &self.payment_key);
                let payment_script = bitcoin::Address::p2wpkh(&::bitcoin::PublicKey{compressed: true, key: remotepubkey}, Network::Bitcoin).unwrap().script_pubkey();
 
                if payment_script != descriptor.output.script_pubkey  { return Err(()); }
@@ -624,7 +624,7 @@ impl InMemorySigner {
                let delayed_payment_pubkey = PublicKey::from_secret_key(&secp_ctx, &delayed_payment_key);
                let witness_script = chan_utils::get_revokeable_redeemscript(&descriptor.revocation_pubkey, descriptor.to_self_delay, &delayed_payment_pubkey);
                let sighash = hash_to_message!(&bip143::SigHashCache::new(spend_tx).signature_hash(input_idx, &witness_script, descriptor.output.value, SigHashType::All)[..]);
-               let local_delayedsig = secp_ctx.sign(&sighash, &delayed_payment_key);
+               let local_delayedsig = sign(secp_ctx, &sighash, &delayed_payment_key);
                let payment_script = bitcoin::Address::p2wsh(&witness_script, Network::Bitcoin).script_pubkey();
 
                if descriptor.output.script_pubkey != payment_script { return Err(()); }
@@ -673,7 +673,7 @@ impl BaseSign for InMemorySigner {
                        let htlc_sighashtype = if self.opt_anchors() { SigHashType::SinglePlusAnyoneCanPay } else { SigHashType::All };
                        let htlc_sighash = hash_to_message!(&bip143::SigHashCache::new(&htlc_tx).signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype)[..]);
                        let holder_htlc_key = chan_utils::derive_private_key(&secp_ctx, &keys.per_commitment_point, &self.htlc_base_key).map_err(|_| ())?;
-                       htlc_sigs.push(secp_ctx.sign(&htlc_sighash, &holder_htlc_key));
+                       htlc_sigs.push(sign(secp_ctx, &htlc_sighash, &holder_htlc_key));
                }
 
                Ok((commitment_sig, htlc_sigs))
@@ -714,7 +714,7 @@ impl BaseSign for InMemorySigner {
                };
                let mut sighash_parts = bip143::SigHashCache::new(justice_tx);
                let sighash = hash_to_message!(&sighash_parts.signature_hash(input, &witness_script, amount, SigHashType::All)[..]);
-               return Ok(secp_ctx.sign(&sighash, &revocation_key))
+               return Ok(sign(secp_ctx, &sighash, &revocation_key))
        }
 
        fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
@@ -728,7 +728,7 @@ impl BaseSign for InMemorySigner {
                };
                let mut sighash_parts = bip143::SigHashCache::new(justice_tx);
                let sighash = hash_to_message!(&sighash_parts.signature_hash(input, &witness_script, amount, SigHashType::All)[..]);
-               return Ok(secp_ctx.sign(&sighash, &revocation_key))
+               return Ok(sign(secp_ctx, &sighash, &revocation_key))
        }
 
        fn sign_counterparty_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, amount: u64, per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
@@ -742,7 +742,7 @@ impl BaseSign for InMemorySigner {
                        } else { return Err(()) };
                        let mut sighash_parts = bip143::SigHashCache::new(htlc_tx);
                        let sighash = hash_to_message!(&sighash_parts.signature_hash(input, &witness_script, amount, SigHashType::All)[..]);
-                       return Ok(secp_ctx.sign(&sighash, &htlc_key))
+                       return Ok(sign(secp_ctx, &sighash, &htlc_key))
                }
                Err(())
        }
@@ -756,7 +756,7 @@ impl BaseSign for InMemorySigner {
        fn sign_channel_announcement(&self, msg: &UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<secp256k1::All>)
        -> Result<(Signature, Signature), ()> {
                let msghash = hash_to_message!(&Sha256dHash::hash(&msg.encode()[..])[..]);
-               Ok((secp_ctx.sign(&msghash, &self.node_secret), secp_ctx.sign(&msghash, &self.funding_key)))
+               Ok((sign(secp_ctx, &msghash, &self.node_secret), sign(secp_ctx, &msghash, &self.funding_key)))
        }
 
        fn ready_channel(&mut self, channel_parameters: &ChannelTransactionParameters) {
@@ -1102,7 +1102,7 @@ impl KeysManager {
                                        if payment_script != output.script_pubkey { return Err(()); };
 
                                        let sighash = hash_to_message!(&bip143::SigHashCache::new(&spend_tx).signature_hash(input_idx, &witness_script, output.value, SigHashType::All)[..]);
-                                       let sig = secp_ctx.sign(&sighash, &secret.private_key.key);
+                                       let sig = sign(secp_ctx, &sighash, &secret.private_key.key);
                                        spend_tx.input[input_idx].witness.push(sig.serialize_der().to_vec());
                                        spend_tx.input[input_idx].witness[0].push(SigHashType::All as u8);
                                        spend_tx.input[input_idx].witness.push(pubkey.key.serialize().to_vec());
index 7ee5ee521bc55e37fe578180c25f8d58e647bf53..b31ceacea15852def4203b037d8e36216094f5c4 100644 (file)
@@ -43,30 +43,90 @@ impl Condvar {
 }
 
 thread_local! {
-       /// We track the set of locks currently held by a reference to their `MutexMetadata`
-       static MUTEXES_HELD: RefCell<HashSet<Arc<MutexMetadata>>> = RefCell::new(HashSet::new());
+       /// We track the set of locks currently held by a reference to their `LockMetadata`
+       static LOCKS_HELD: RefCell<HashSet<Arc<LockMetadata>>> = RefCell::new(HashSet::new());
 }
-static MUTEX_IDX: AtomicUsize = AtomicUsize::new(0);
+static LOCK_IDX: AtomicUsize = AtomicUsize::new(0);
 
-/// Metadata about a single mutex, by id, the set of things locked-before it, and the backtrace of
+/// Metadata about a single lock, by id, the set of things locked-before it, and the backtrace of
 /// when the Mutex itself was constructed.
-struct MutexMetadata {
-       mutex_idx: u64,
-       locked_before: StdMutex<HashSet<Arc<MutexMetadata>>>,
+struct LockMetadata {
+       lock_idx: u64,
+       locked_before: StdMutex<HashSet<Arc<LockMetadata>>>,
        #[cfg(feature = "backtrace")]
-       mutex_construction_bt: Backtrace,
+       lock_construction_bt: Backtrace,
 }
-impl PartialEq for MutexMetadata {
-       fn eq(&self, o: &MutexMetadata) -> bool { self.mutex_idx == o.mutex_idx }
+impl PartialEq for LockMetadata {
+       fn eq(&self, o: &LockMetadata) -> bool { self.lock_idx == o.lock_idx }
 }
-impl Eq for MutexMetadata {}
-impl std::hash::Hash for MutexMetadata {
-       fn hash<H: std::hash::Hasher>(&self, hasher: &mut H) { hasher.write_u64(self.mutex_idx); }
+impl Eq for LockMetadata {}
+impl std::hash::Hash for LockMetadata {
+       fn hash<H: std::hash::Hasher>(&self, hasher: &mut H) { hasher.write_u64(self.lock_idx); }
+}
+
+impl LockMetadata {
+       fn new() -> LockMetadata {
+               LockMetadata {
+                       locked_before: StdMutex::new(HashSet::new()),
+                       lock_idx: LOCK_IDX.fetch_add(1, Ordering::Relaxed) as u64,
+                       #[cfg(feature = "backtrace")]
+                       lock_construction_bt: Backtrace::new(),
+               }
+       }
+
+       // Returns whether we were a recursive lock (only relevant for read)
+       fn _pre_lock(this: &Arc<LockMetadata>, read: bool) -> bool {
+               let mut inserted = false;
+               LOCKS_HELD.with(|held| {
+                       // For each lock which is currently locked, check that no lock's locked-before
+                       // set includes the lock we're about to lock, which would imply a lockorder
+                       // inversion.
+                       for locked in held.borrow().iter() {
+                               if read && *locked == *this {
+                                       // Recursive read locks are explicitly allowed
+                                       return;
+                               }
+                       }
+                       for locked in held.borrow().iter() {
+                               if !read && *locked == *this {
+                                       panic!("Tried to lock a lock while it was held!");
+                               }
+                               for locked_dep in locked.locked_before.lock().unwrap().iter() {
+                                       if *locked_dep == *this {
+                                               #[cfg(feature = "backtrace")]
+                                               panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\n{:?}", locked.lock_construction_bt);
+                                               #[cfg(not(feature = "backtrace"))]
+                                               panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info.");
+                                       }
+                               }
+                               // Insert any already-held locks in our locked-before set.
+                               this.locked_before.lock().unwrap().insert(Arc::clone(locked));
+                       }
+                       held.borrow_mut().insert(Arc::clone(this));
+                       inserted = true;
+               });
+               inserted
+       }
+
+       fn pre_lock(this: &Arc<LockMetadata>) { Self::_pre_lock(this, false); }
+       fn pre_read_lock(this: &Arc<LockMetadata>) -> bool { Self::_pre_lock(this, true) }
+
+       fn try_locked(this: &Arc<LockMetadata>) {
+               LOCKS_HELD.with(|held| {
+                       // Since a try-lock will simply fail if the lock is held already, we do not
+                       // consider try-locks to ever generate lockorder inversions. However, if a try-lock
+                       // succeeds, we do consider it to have created lockorder dependencies.
+                       for locked in held.borrow().iter() {
+                               this.locked_before.lock().unwrap().insert(Arc::clone(locked));
+                       }
+                       held.borrow_mut().insert(Arc::clone(this));
+               });
+       }
 }
 
 pub struct Mutex<T: Sized> {
        inner: StdMutex<T>,
-       deps: Arc<MutexMetadata>,
+       deps: Arc<LockMetadata>,
 }
 
 #[must_use = "if unused the Mutex will immediately unlock"]
@@ -88,7 +148,7 @@ impl<'a, T: Sized> MutexGuard<'a, T> {
 
 impl<T: Sized> Drop for MutexGuard<'_, T> {
        fn drop(&mut self) {
-               MUTEXES_HELD.with(|held| {
+               LOCKS_HELD.with(|held| {
                        held.borrow_mut().remove(&self.mutex.deps);
                });
        }
@@ -110,104 +170,195 @@ impl<T: Sized> DerefMut for MutexGuard<'_, T> {
 
 impl<T> Mutex<T> {
        pub fn new(inner: T) -> Mutex<T> {
-               Mutex {
-                       inner: StdMutex::new(inner),
-                       deps: Arc::new(MutexMetadata {
-                               locked_before: StdMutex::new(HashSet::new()),
-                               mutex_idx: MUTEX_IDX.fetch_add(1, Ordering::Relaxed) as u64,
-                               #[cfg(feature = "backtrace")]
-                               mutex_construction_bt: Backtrace::new(),
-                       }),
-               }
+               Mutex { inner: StdMutex::new(inner), deps: Arc::new(LockMetadata::new()) }
        }
 
        pub fn lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
-               MUTEXES_HELD.with(|held| {
-                       // For each mutex which is currently locked, check that no mutex's locked-before
-                       // set includes the mutex we're about to lock, which would imply a lockorder
-                       // inversion.
-                       for locked in held.borrow().iter() {
-                               for locked_dep in locked.locked_before.lock().unwrap().iter() {
-                                       if *locked_dep == self.deps {
-                                               #[cfg(feature = "backtrace")]
-                                               panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\n{:?}", locked.mutex_construction_bt);
-                                               #[cfg(not(feature = "backtrace"))]
-                                               panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info.");
-                                       }
-                               }
-                               // Insert any already-held mutexes in our locked-before set.
-                               self.deps.locked_before.lock().unwrap().insert(Arc::clone(locked));
-                       }
-                       held.borrow_mut().insert(Arc::clone(&self.deps));
-               });
+               LockMetadata::pre_lock(&self.deps);
                self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ())
        }
 
        pub fn try_lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
                let res = self.inner.try_lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ());
                if res.is_ok() {
-                       MUTEXES_HELD.with(|held| {
-                               // Since a try-lock will simply fail if the lock is held already, we do not
-                               // consider try-locks to ever generate lockorder inversions. However, if a try-lock
-                               // succeeds, we do consider it to have created lockorder dependencies.
-                               for locked in held.borrow().iter() {
-                                       self.deps.locked_before.lock().unwrap().insert(Arc::clone(locked));
-                               }
-                               held.borrow_mut().insert(Arc::clone(&self.deps));
-                       });
+                       LockMetadata::try_locked(&self.deps);
                }
                res
        }
 }
 
-pub struct RwLock<T: ?Sized> {
-       inner: StdRwLock<T>
+pub struct RwLock<T: Sized> {
+       inner: StdRwLock<T>,
+       deps: Arc<LockMetadata>,
 }
 
-pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
-       lock: StdRwLockReadGuard<'a, T>,
+pub struct RwLockReadGuard<'a, T: Sized + 'a> {
+       lock: &'a RwLock<T>,
+       first_lock: bool,
+       guard: StdRwLockReadGuard<'a, T>,
 }
 
-pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> {
-       lock: StdRwLockWriteGuard<'a, T>,
+pub struct RwLockWriteGuard<'a, T: Sized + 'a> {
+       lock: &'a RwLock<T>,
+       guard: StdRwLockWriteGuard<'a, T>,
 }
 
-impl<T: ?Sized> Deref for RwLockReadGuard<'_, T> {
+impl<T: Sized> Deref for RwLockReadGuard<'_, T> {
        type Target = T;
 
        fn deref(&self) -> &T {
-               &self.lock.deref()
+               &self.guard.deref()
        }
 }
 
-impl<T: ?Sized> Deref for RwLockWriteGuard<'_, T> {
+impl<T: Sized> Drop for RwLockReadGuard<'_, T> {
+       fn drop(&mut self) {
+               if !self.first_lock {
+                       // Note that its not strictly true that the first taken read lock will get unlocked
+                       // last, but in practice our locks are always taken as RAII, so it should basically
+                       // always be true.
+                       return;
+               }
+               LOCKS_HELD.with(|held| {
+                       held.borrow_mut().remove(&self.lock.deps);
+               });
+       }
+}
+
+impl<T: Sized> Deref for RwLockWriteGuard<'_, T> {
        type Target = T;
 
        fn deref(&self) -> &T {
-               &self.lock.deref()
+               &self.guard.deref()
+       }
+}
+
+impl<T: Sized> Drop for RwLockWriteGuard<'_, T> {
+       fn drop(&mut self) {
+               LOCKS_HELD.with(|held| {
+                       held.borrow_mut().remove(&self.lock.deps);
+               });
        }
 }
 
-impl<T: ?Sized> DerefMut for RwLockWriteGuard<'_, T> {
+impl<T: Sized> DerefMut for RwLockWriteGuard<'_, T> {
        fn deref_mut(&mut self) -> &mut T {
-               self.lock.deref_mut()
+               self.guard.deref_mut()
        }
 }
 
 impl<T> RwLock<T> {
        pub fn new(inner: T) -> RwLock<T> {
-               RwLock { inner: StdRwLock::new(inner) }
+               RwLock { inner: StdRwLock::new(inner), deps: Arc::new(LockMetadata::new()) }
        }
 
        pub fn read<'a>(&'a self) -> LockResult<RwLockReadGuard<'a, T>> {
-               self.inner.read().map(|lock| RwLockReadGuard { lock }).map_err(|_| ())
+               let first_lock = LockMetadata::pre_read_lock(&self.deps);
+               self.inner.read().map(|guard| RwLockReadGuard { lock: self, guard, first_lock }).map_err(|_| ())
        }
 
        pub fn write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> {
-               self.inner.write().map(|lock| RwLockWriteGuard { lock }).map_err(|_| ())
+               LockMetadata::pre_lock(&self.deps);
+               self.inner.write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ())
        }
 
        pub fn try_write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> {
-               self.inner.try_write().map(|lock| RwLockWriteGuard { lock }).map_err(|_| ())
+               let res = self.inner.try_write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ());
+               if res.is_ok() {
+                       LockMetadata::try_locked(&self.deps);
+               }
+               res
+       }
+}
+
+#[test]
+#[should_panic]
+fn recursive_lock_fail() {
+       let mutex = Mutex::new(());
+       let _a = mutex.lock().unwrap();
+       let _b = mutex.lock().unwrap();
+}
+
+#[test]
+fn recursive_read() {
+       let lock = RwLock::new(());
+       let _a = lock.read().unwrap();
+       let _b = lock.read().unwrap();
+}
+
+#[test]
+#[should_panic]
+fn lockorder_fail() {
+       let a = Mutex::new(());
+       let b = Mutex::new(());
+       {
+               let _a = a.lock().unwrap();
+               let _b = b.lock().unwrap();
+       }
+       {
+               let _b = b.lock().unwrap();
+               let _a = a.lock().unwrap();
+       }
+}
+
+#[test]
+#[should_panic]
+fn write_lockorder_fail() {
+       let a = RwLock::new(());
+       let b = RwLock::new(());
+       {
+               let _a = a.write().unwrap();
+               let _b = b.write().unwrap();
+       }
+       {
+               let _b = b.write().unwrap();
+               let _a = a.write().unwrap();
+       }
+}
+
+#[test]
+#[should_panic]
+fn read_lockorder_fail() {
+       let a = RwLock::new(());
+       let b = RwLock::new(());
+       {
+               let _a = a.read().unwrap();
+               let _b = b.read().unwrap();
+       }
+       {
+               let _b = b.read().unwrap();
+               let _a = a.read().unwrap();
+       }
+}
+
+#[test]
+fn read_recurisve_no_lockorder() {
+       // Like the above, but note that no lockorder is implied when we recursively read-lock a
+       // RwLock, causing this to pass just fine.
+       let a = RwLock::new(());
+       let b = RwLock::new(());
+       let _outer = a.read().unwrap();
+       {
+               let _a = a.read().unwrap();
+               let _b = b.read().unwrap();
+       }
+       {
+               let _b = b.read().unwrap();
+               let _a = a.read().unwrap();
+       }
+}
+
+#[test]
+#[should_panic]
+fn read_write_lockorder_fail() {
+       let a = RwLock::new(());
+       let b = RwLock::new(());
+       {
+               let _a = a.write().unwrap();
+               let _b = b.read().unwrap();
+       }
+       {
+               let _b = b.read().unwrap();
+               let _a = a.write().unwrap();
        }
 }
index 6dc05c2ef3c09e2fcd5fc16ad8a2ebe2427343d3..370c0cc8edfe6737f3f1d65f5dab59ea688ee854 100644 (file)
@@ -39,6 +39,7 @@ use util::transaction_utils::sort_outputs;
 use ln::channel::{INITIAL_COMMITMENT_NUMBER, ANCHOR_OUTPUT_VALUE_SATOSHI};
 use core::ops::Deref;
 use chain;
+use util::crypto::sign;
 
 pub(crate) const MAX_HTLCS: u16 = 483;
 
@@ -841,7 +842,7 @@ impl HolderCommitmentTransaction {
        pub fn dummy() -> Self {
                let secp_ctx = Secp256k1::new();
                let dummy_key = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
-               let dummy_sig = secp_ctx.sign(&secp256k1::Message::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap());
+               let dummy_sig = sign(&secp_ctx, &secp256k1::Message::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap());
 
                let keys = TxCreationKeys {
                        per_commitment_point: dummy_key.clone(),
@@ -936,7 +937,7 @@ impl BuiltCommitmentTransaction {
        /// because we are about to broadcast a holder transaction.
        pub fn sign<T: secp256k1::Signing>(&self, funding_key: &SecretKey, funding_redeemscript: &Script, channel_value_satoshis: u64, secp_ctx: &Secp256k1<T>) -> Signature {
                let sighash = self.get_sighash_all(funding_redeemscript, channel_value_satoshis);
-               secp_ctx.sign(&sighash, funding_key)
+               sign(secp_ctx, &sighash, funding_key)
        }
 }
 
@@ -1060,7 +1061,7 @@ impl<'a> TrustedClosingTransaction<'a> {
        /// because we are about to broadcast a holder transaction.
        pub fn sign<T: secp256k1::Signing>(&self, funding_key: &SecretKey, funding_redeemscript: &Script, channel_value_satoshis: u64, secp_ctx: &Secp256k1<T>) -> Signature {
                let sighash = self.get_sighash_all(funding_redeemscript, channel_value_satoshis);
-               secp_ctx.sign(&sighash, funding_key)
+               sign(secp_ctx, &sighash, funding_key)
        }
 }
 
@@ -1415,7 +1416,7 @@ impl<'a> TrustedCommitmentTransaction<'a> {
                        let htlc_redeemscript = get_htlc_redeemscript_with_explicit_keys(&this_htlc, self.opt_anchors(), &keys.broadcaster_htlc_key, &keys.countersignatory_htlc_key, &keys.revocation_key);
 
                        let sighash = hash_to_message!(&bip143::SigHashCache::new(&htlc_tx).signature_hash(0, &htlc_redeemscript, this_htlc.amount_msat / 1000, SigHashType::All)[..]);
-                       ret.push(secp_ctx.sign(&sighash, &holder_htlc_key));
+                       ret.push(sign(secp_ctx, &sighash, &holder_htlc_key));
                }
                Ok(ret)
        }
index a6c5649c1d954e5b76f7efe8e28dd675d9d76c31..94af00c7c72e8eebd3263a0a16e50a93c2e32cd7 100644 (file)
@@ -16,7 +16,7 @@ use bitcoin::blockdata::block::{Block, BlockHeader};
 use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::hash_types::BlockHash;
 use bitcoin::network::constants::Network;
-use chain::channelmonitor::ChannelMonitor;
+use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
 use chain::transaction::OutPoint;
 use chain::{ChannelMonitorUpdateErr, Listen, Watch};
 use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure};
@@ -334,10 +334,10 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
                nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
                nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 
-               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
                let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
                assert_eq!(reestablish_1.len(), 1);
-               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
                let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
                assert_eq!(reestablish_2.len(), 1);
 
@@ -356,10 +356,10 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
                assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
-               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
                let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
                assert_eq!(reestablish_1.len(), 1);
-               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
                let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
                assert_eq!(reestablish_2.len(), 1);
 
@@ -1108,8 +1108,8 @@ fn test_monitor_update_fail_reestablish() {
        commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
 
        chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
 
        let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
        let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
@@ -1127,8 +1127,8 @@ fn test_monitor_update_fail_reestablish() {
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
 
        assert!(as_reestablish == get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()));
        assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()));
@@ -1300,8 +1300,8 @@ fn claim_while_disconnected_monitor_update_fail() {
        assert!(nodes[1].node.claim_funds(payment_preimage_1));
        check_added_monitors!(nodes[1], 1);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
 
        let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
        let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
@@ -1432,8 +1432,8 @@ fn monitor_failed_no_reestablish_response() {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
 
        let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
        let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
@@ -2012,9 +2012,9 @@ fn test_pending_update_fee_ack_on_reconnect() {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
        let as_connect_msg = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
        let bs_connect_msg = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
@@ -2058,6 +2058,50 @@ fn test_pending_update_fee_ack_on_reconnect() {
        claim_payment(&nodes[1], &[&nodes[0]], payment_preimage);
 }
 
+#[test]
+fn test_fail_htlc_on_broadcast_after_claim() {
+       // In an earlier version of 7e78fa660cec8a73286c94c1073ee588140e7a01 we'd also fail the inbound
+       // channel backwards if we received an HTLC failure after a HTLC fulfillment. Here we test a
+       // specific case of that by having the HTLC failure come from the ChannelMonitor after a dust
+       // HTLC was not included in a confirmed commitment transaction.
+       //
+       // We first forward a payment, then claim it with an update_fulfill_htlc message, closing the
+       // channel immediately before commitment occurs. After the commitment transaction reaches
+       // ANTI_REORG_DELAY confirmations, will will try to fail the HTLC which was already fulfilled.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+       let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()).2;
+
+       let payment_preimage = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000).0;
+
+       let bs_txn = get_local_commitment_txn!(nodes[2], chan_id_2);
+       assert_eq!(bs_txn.len(), 1);
+
+       nodes[2].node.claim_funds(payment_preimage);
+       check_added_monitors!(nodes[2], 1);
+       let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
+       let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       check_added_monitors!(nodes[1], 1);
+       expect_payment_forwarded!(nodes[1], Some(1000), false);
+
+       mine_transaction(&nodes[1], &bs_txn[0]);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_broadcast!(nodes[1], true);
+       connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
+       check_added_monitors!(nodes[1], 1);
+       expect_pending_htlcs_forwardable!(nodes[1]);
+
+       nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
+       expect_payment_sent_without_paths!(nodes[0], payment_preimage);
+       commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, true, true);
+       expect_payment_path_successful!(nodes[0]);
+}
+
 fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) {
        // In early versions we did not handle resending of update_fee on reconnect correctly. The
        // chanmon_consistency fuzz target, of course, immediately found it, but we test a few cases
@@ -2094,9 +2138,9 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
        let as_connect_msg = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
        let bs_connect_msg = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
@@ -2258,10 +2302,10 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
                nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 
                // Now reconnect the two
-               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
                let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
                assert_eq!(reestablish_1.len(), 1);
-               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
                let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
                assert_eq!(reestablish_2.len(), 1);
 
index 0792164e20d588a043de6700e0c23ca3956eff9d..679ff82e0181d75e8331ccd66bad1ff588cd8f1c 100644 (file)
@@ -4782,7 +4782,7 @@ impl<Signer: Sign> Channel<Signer> {
        /// should be sent back to the counterparty node.
        ///
        /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
-       pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
+       pub fn accept_inbound_channel(&mut self, user_id: u64) -> msgs::AcceptChannel {
                if self.is_outbound() {
                        panic!("Tried to send accept_channel for an outbound channel?");
                }
@@ -4796,6 +4796,7 @@ impl<Signer: Sign> Channel<Signer> {
                        panic!("The inbound channel has already been accepted");
                }
 
+               self.user_id = user_id;
                self.inbound_awaiting_accept = false;
 
                self.generate_accept_channel_message()
@@ -6457,7 +6458,7 @@ mod tests {
                let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
 
                // Node B --> Node A: accept channel, explicitly setting B's dust limit.
-               let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
+               let mut accept_channel_msg = node_b_chan.accept_inbound_channel(0);
                accept_channel_msg.dust_limit_satoshis = 546;
                node_a_chan.accept_channel(&accept_channel_msg, &config.peer_channel_config_limits, &InitFeatures::known()).unwrap();
                node_a_chan.holder_dust_limit_satoshis = 1560;
@@ -6575,7 +6576,7 @@ mod tests {
                let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
 
                // Node B --> Node A: accept channel
-               let accept_channel_msg = node_b_chan.accept_inbound_channel();
+               let accept_channel_msg = node_b_chan.accept_inbound_channel(0);
                node_a_chan.accept_channel(&accept_channel_msg, &config.peer_channel_config_limits, &InitFeatures::known()).unwrap();
 
                // Node A --> Node B: funding created
@@ -6665,6 +6666,7 @@ mod tests {
                }
        }
 
+       #[cfg(not(feature = "grind_signatures"))]
        #[test]
        fn outbound_commitment_test() {
                // Test vectors from BOLT 3 Appendices C and F (anchors):
index 81789313d290780ca254d910cd26d985a57a06ae..cfa2da06fd17bddb9167f14ec9b2efed6a51d7ff 100644 (file)
@@ -69,6 +69,7 @@ use core::ops::Deref;
 
 #[cfg(any(test, feature = "std"))]
 use std::time::Instant;
+use util::crypto::sign;
 
 mod inbound_payment {
        use alloc::string::ToString;
@@ -441,6 +442,7 @@ struct ClaimableHTLC {
        cltv_expiry: u32,
        value: u64,
        onion_payload: OnionPayload,
+       timer_ticks: u8,
 }
 
 /// A payment identifier used to uniquely identify a payment to LDK.
@@ -886,6 +888,8 @@ impl PendingOutboundPayment {
 /// issues such as overly long function definitions. Note that the ChannelManager can take any
 /// type that implements KeysInterface for its keys manager, but this type alias chooses the
 /// concrete type of the KeysManager.
+///
+/// (C-not exported) as Arcs don't make sense in bindings
 pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<InMemorySigner, Arc<M>, Arc<T>, Arc<KeysManager>, Arc<F>, Arc<L>>;
 
 /// SimpleRefChannelManager is a type alias for a ChannelManager reference, and is the reference
@@ -896,6 +900,8 @@ pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<InMemorySigner, Ar
 /// helps with issues such as long function definitions. Note that the ChannelManager can take any
 /// type that implements KeysInterface for its keys manager, but this type alias chooses the
 /// concrete type of the KeysManager.
+///
+/// (C-not exported) as Arcs don't make sense in bindings
 pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManager<InMemorySigner, &'a M, &'b T, &'c KeysManager, &'d F, &'e L>;
 
 /// Manager which keeps track of a number of channels and sends messages to the appropriate
@@ -1148,6 +1154,9 @@ const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_G
 /// pending HTLCs in flight.
 pub(crate) const PAYMENT_EXPIRY_BLOCKS: u32 = 3;
 
+/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs
+pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
+
 /// Information needed for constructing an invoice route hint for this channel.
 #[derive(Clone, Debug, PartialEq)]
 pub struct CounterpartyForwardingInfo {
@@ -3074,7 +3083,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        excess_data: Vec::new(),
                };
                let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
-               let node_announce_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
+               let node_announce_sig = sign(&self.secp_ctx, &msghash, &self.our_network_key);
 
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
@@ -3348,6 +3357,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                phantom_shared_secret,
                                                                        },
                                                                        value: amt_to_forward,
+                                                                       timer_ticks: 0,
                                                                        cltv_expiry,
                                                                        onion_payload,
                                                                };
@@ -3642,6 +3652,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
 
                        let mut handle_errors = Vec::new();
+                       let mut timed_out_mpp_htlcs = Vec::new();
                        {
                                let mut channel_state_lock = self.channel_state.lock().unwrap();
                                let channel_state = &mut *channel_state_lock;
@@ -3690,6 +3701,32 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                                        true
                                });
+
+                               channel_state.claimable_htlcs.retain(|payment_hash, htlcs| {
+                                       if htlcs.is_empty() {
+                                               // This should be unreachable
+                                               debug_assert!(false);
+                                               return false;
+                                       }
+                                       if let OnionPayload::Invoice(ref final_hop_data) = htlcs[0].onion_payload {
+                                               // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
+                                               // In this case we're not going to handle any timeouts of the parts here.
+                                               if final_hop_data.total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
+                                                       return true;
+                                               } else if htlcs.into_iter().any(|htlc| {
+                                                       htlc.timer_ticks += 1;
+                                                       return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
+                                               }) {
+                                                       timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone())));
+                                                       return false;
+                                               }
+                                       }
+                                       true
+                               });
+                       }
+
+                       for htlc_source in timed_out_mpp_htlcs.drain(..) {
+                               self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() });
                        }
 
                        for (err, counterparty_node_id) in handle_errors.drain(..) {
@@ -4316,8 +4353,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        ///
        /// The `temporary_channel_id` parameter indicates which inbound channel should be accepted.
        ///
-       /// [`Event::OpenChannelRequest`]: crate::util::events::Event::OpenChannelRequest
-       pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32]) -> Result<(), APIError> {
+       /// For inbound channels, the `user_channel_id` parameter will be provided back in
+       /// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond
+       /// with which `accept_inbound_channel` call.
+       ///
+       /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
+       /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
+       pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], user_channel_id: u64) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
                let mut channel_state_lock = self.channel_state.lock().unwrap();
@@ -4329,7 +4371,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                }
                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
                                        node_id: channel.get().get_counterparty_node_id(),
-                                       msg: channel.get_mut().accept_inbound_channel(),
+                                       msg: channel.get_mut().accept_inbound_channel(user_channel_id),
                                });
                        }
                        hash_map::Entry::Vacant(_) => {
@@ -4370,7 +4412,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                if !self.default_configuration.manually_accept_inbound_channels {
                                        channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
                                                node_id: counterparty_node_id.clone(),
-                                               msg: channel.accept_inbound_channel(),
+                                               msg: channel.accept_inbound_channel(0),
                                        });
                                } else {
                                        let mut pending_events = self.pending_events.lock().unwrap();
@@ -5555,6 +5597,12 @@ where
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
                self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger)
                        .map(|(a, b)| (a, Vec::new(), b)));
+
+               let last_best_block_height = self.best_block.read().unwrap().height();
+               if height < last_best_block_height {
+                       let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
+                       self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger));
+               }
        }
 
        fn best_block_updated(&self, header: &BlockHeader, height: u32) {
@@ -5944,6 +5992,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                        &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
                                        &events::MessageSendEvent::SendShortIdsQuery { .. } => false,
                                        &events::MessageSendEvent::SendReplyChannelRange { .. } => false,
+                                       &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false,
                                }
                        });
                }
@@ -6302,6 +6351,7 @@ impl Readable for ClaimableHTLC {
                };
                Ok(Self {
                        prev_hop: prev_hop.0.unwrap(),
+                       timer_ticks: 0,
                        value,
                        onion_payload,
                        cltv_expiry,
@@ -7376,8 +7426,8 @@ mod tests {
 
                let payer_pubkey = nodes[0].node.get_our_node_id();
                let payee_pubkey = nodes[1].node.get_our_node_id();
-               nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
-               nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
+               nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
+               nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
 
                let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
                let route_params = RouteParameters {
@@ -7420,8 +7470,8 @@ mod tests {
 
                let payer_pubkey = nodes[0].node.get_our_node_id();
                let payee_pubkey = nodes[1].node.get_our_node_id();
-               nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
-               nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
+               nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
+               nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
 
                let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
                let route_params = RouteParameters {
@@ -7586,8 +7636,8 @@ pub mod bench {
                });
                let node_b_holder = NodeHolder { node: &node_b };
 
-               node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: InitFeatures::known() });
-               node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: InitFeatures::known() });
+               node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+               node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
                node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
                node_b.handle_open_channel(&node_a.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
                node_a.handle_accept_channel(&node_b.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
index d65bf2ee3445303d60e52c7f9bcfa7e23f7c92fd..aa6fdc97b4a897e1034fc3524880c9aa46b1ed44 100644 (file)
@@ -1897,8 +1897,8 @@ pub fn create_network<'a, 'b: 'a, 'c: 'b>(node_count: usize, cfgs: &'b Vec<NodeC
 
        for i in 0..node_count {
                for j in (i+1)..node_count {
-                       nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &msgs::Init { features: cfgs[j].features.clone() });
-                       nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &msgs::Init { features: cfgs[i].features.clone() });
+                       nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &msgs::Init { features: cfgs[j].features.clone(), remote_network_address: None });
+                       nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &msgs::Init { features: cfgs[i].features.clone(), remote_network_address: None });
                }
        }
 
@@ -2157,9 +2157,9 @@ macro_rules! handle_chan_reestablish_msgs {
 /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
 /// for claims/fails they are separated out.
 pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, send_funding_locked: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_htlc_fails: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool))  {
-       node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b);
-       node_b.node.peer_connected(&node_a.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       node_b.node.peer_connected(&node_a.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a);
 
        if send_funding_locked.0 {
index 1f64ef0a920efc27a644fb69ee5aacb7e87e45fe..525b4f33c438f0c01fda291ae90fbdab4f2c7867 100644 (file)
@@ -3779,9 +3779,9 @@ fn test_funding_peer_disconnect() {
        let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
        assert!(events_2.is_empty());
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
 
        // nodes[0] hasn't yet received a funding_locked, so it only sends that on reconnect.
@@ -3940,6 +3940,32 @@ fn test_funding_peer_disconnect() {
        assert!(found_announcement);
 }
 
+#[test]
+fn test_funding_locked_without_best_block_updated() {
+       // Previously, if we were offline when a funding transaction was locked in, and then we came
+       // back online, calling best_block_updated once followed by transactions_confirmed, we'd not
+       // generate a funding_locked until a later best_block_updated. This tests that we generate the
+       // funding_locked immediately instead.
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+       *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
+
+       let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0, InitFeatures::known(), InitFeatures::known());
+
+       let conf_height = nodes[0].best_block_info().1 + 1;
+       connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
+       let block_txn = [funding_tx];
+       let conf_txn: Vec<_> = block_txn.iter().enumerate().collect();
+       let conf_block_header = nodes[0].get_block_header(conf_height);
+       nodes[0].node.transactions_confirmed(&conf_block_header, &conf_txn[..], conf_height);
+
+       // Ensure nodes[0] generates a funding_locked after the transactions_confirmed
+       let as_funding_locked = get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding_locked);
+}
+
 #[test]
 fn test_drop_messages_peer_disconnect_dual_htlc() {
        // Test that we can handle reconnecting when both sides of a channel have pending
@@ -4000,10 +4026,10 @@ fn test_drop_messages_peer_disconnect_dual_htlc() {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
        assert_eq!(reestablish_1.len(), 1);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
        assert_eq!(reestablish_2.len(), 1);
 
@@ -4279,9 +4305,9 @@ fn test_no_txn_manager_serialize_deserialize() {
        assert_eq!(nodes[0].node.list_channels().len(), 1);
        check_added_monitors!(nodes[0], 1);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
@@ -4399,9 +4425,9 @@ fn test_manager_serialize_deserialize_events() {
        assert_eq!(nodes[0].node.list_channels().len(), 1);
        check_added_monitors!(nodes[0], 1);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
@@ -4594,9 +4620,9 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        //... and we can even still claim the payment!
        claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
 
-       nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let reestablish = get_event_msg!(nodes[3], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
-       nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish);
        let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(msg_events.len(), 1);
@@ -6411,7 +6437,8 @@ fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() {
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0, InitFeatures::known(), InitFeatures::known());
 
-       let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], vec![], 100000000, 500000001);
+       let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], vec![], 100000000, 0);
+       route.paths[0].last_mut().unwrap().cltv_expiry_delta = 500000001;
        unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::RouteError { ref err },
                assert_eq!(err, &"Channel CLTV overflowed?"));
 }
@@ -6654,10 +6681,10 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
        //Disconnect and Reconnect
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
        assert_eq!(reestablish_1.len(), 1);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
        assert_eq!(reestablish_2.len(), 1);
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
@@ -7302,8 +7329,8 @@ fn test_data_loss_protect() {
 
        check_added_monitors!(nodes[0], 1);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
 
        let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
 
@@ -7451,10 +7478,10 @@ fn test_announce_disable_channels() {
                }
        }
        // Reconnect peers
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
        assert_eq!(reestablish_1.len(), 3);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
        assert_eq!(reestablish_2.len(), 3);
 
@@ -8083,7 +8110,7 @@ fn test_manually_accept_inbound_channel_request() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
+       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
        let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &res);
@@ -8095,7 +8122,7 @@ fn test_manually_accept_inbound_channel_request() {
        let events = nodes[1].node.get_and_clear_pending_events();
        match events[0] {
                Event::OpenChannelRequest { temporary_channel_id, .. } => {
-                       nodes[1].node.accept_inbound_channel(&temporary_channel_id).unwrap();
+                       nodes[1].node.accept_inbound_channel(&temporary_channel_id, 23).unwrap();
                }
                _ => panic!("Unexpected event"),
        }
@@ -8109,6 +8136,19 @@ fn test_manually_accept_inbound_channel_request() {
                }
                _ => panic!("Unexpected event"),
        }
+
+       nodes[1].node.force_close_channel(&temp_channel_id).unwrap();
+
+       let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
+       assert_eq!(close_msg_ev.len(), 1);
+
+       let events = nodes[1].node.get_and_clear_pending_events();
+       match events[0] {
+               Event::ChannelClosed { user_channel_id, .. } => {
+                       assert_eq!(user_channel_id, 23);
+               }
+               _ => panic!("Unexpected event"),
+       }
 }
 
 #[test]
@@ -8232,8 +8272,8 @@ fn test_can_not_accept_inbound_channel_twice() {
        let events = nodes[1].node.get_and_clear_pending_events();
        match events[0] {
                Event::OpenChannelRequest { temporary_channel_id, .. } => {
-                       nodes[1].node.accept_inbound_channel(&temporary_channel_id).unwrap();
-                       let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id);
+                       nodes[1].node.accept_inbound_channel(&temporary_channel_id, 0).unwrap();
+                       let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id, 0);
                        match api_res {
                                Err(APIError::APIMisuseError { err }) => {
                                        assert_eq!(err, "The channel isn't currently awaiting to be accepted.");
@@ -8265,7 +8305,7 @@ fn test_can_not_accept_unknown_inbound_channel() {
        let node = create_network(1, &node_cfg, &node_chanmgr)[0].node;
 
        let unknown_channel_id = [0; 32];
-       let api_res = node.accept_inbound_channel(&unknown_channel_id);
+       let api_res = node.accept_inbound_channel(&unknown_channel_id, 0);
        match api_res {
                Err(APIError::ChannelUnavailable { err }) => {
                        assert_eq!(err, "Can't accept a channel that doesn't exist");
@@ -9487,8 +9527,8 @@ fn test_keysend_payments_to_private_node() {
 
        let payer_pubkey = nodes[0].node.get_our_node_id();
        let payee_pubkey = nodes[1].node.get_our_node_id();
-       nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
-       nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
+       nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
+       nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
 
        let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
        let route_params = RouteParameters {
index fab396e3c7112af934021041abfcf2e1f4d609af..ef07d4c918f8597f28a6b3df90aeaded15b06bb9 100644 (file)
@@ -75,6 +75,11 @@ pub enum DecodeError {
 pub struct Init {
        /// The relevant features which the sender supports
        pub features: InitFeatures,
+       /// The receipient's network address. This adds the option to report a remote IP address
+       /// back to a connecting peer using the init message. A node can decide to use that information
+       /// to discover a potential update to its public IPv4 address (NAT) and use
+       /// that for a node_announcement update message containing the new address.
+       pub remote_network_address: Option<NetAddress>,
 }
 
 /// An error message to be sent or received from a peer
@@ -886,7 +891,7 @@ pub trait RoutingMessageHandler : MessageSendEventsProvider {
        /// Called when a connection is established with a peer. This can be used to
        /// perform routing table synchronization using a strategy defined by the
        /// implementor.
-       fn sync_routing_table(&self, their_node_id: &PublicKey, init: &Init);
+       fn peer_connected(&self, their_node_id: &PublicKey, init: &Init);
        /// Handles the reply of a query we initiated to learn about channels
        /// for a given range of blocks. We can expect to receive one or more
        /// replies to a single query.
@@ -1167,7 +1172,11 @@ impl Writeable for Init {
                // global_features gets the bottom 13 bits of our features, and local_features gets all of
                // our relevant feature bits. This keeps us compatible with old nodes.
                self.features.write_up_to_13(w)?;
-               self.features.write(w)
+               self.features.write(w)?;
+               encode_tlv_stream!(w, {
+                       (3, self.remote_network_address, option)
+               });
+               Ok(())
        }
 }
 
@@ -1175,8 +1184,13 @@ impl Readable for Init {
        fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
                let global_features: InitFeatures = Readable::read(r)?;
                let features: InitFeatures = Readable::read(r)?;
+               let mut remote_network_address: Option<NetAddress> = None;
+               decode_tlv_stream!(r, {
+                       (3, remote_network_address, option)
+               });
                Ok(Init {
                        features: features.or(global_features),
+                       remote_network_address,
                })
        }
 }
@@ -2447,13 +2461,27 @@ mod tests {
        fn encoding_init() {
                assert_eq!(msgs::Init {
                        features: InitFeatures::from_le_bytes(vec![0xFF, 0xFF, 0xFF]),
+                       remote_network_address: None,
                }.encode(), hex::decode("00023fff0003ffffff").unwrap());
                assert_eq!(msgs::Init {
                        features: InitFeatures::from_le_bytes(vec![0xFF]),
+                       remote_network_address: None,
                }.encode(), hex::decode("0001ff0001ff").unwrap());
                assert_eq!(msgs::Init {
                        features: InitFeatures::from_le_bytes(vec![]),
+                       remote_network_address: None,
                }.encode(), hex::decode("00000000").unwrap());
+
+               let init_msg = msgs::Init { features: InitFeatures::from_le_bytes(vec![]),
+                       remote_network_address: Some(msgs::NetAddress::IPv4 {
+                               addr: [127, 0, 0, 1],
+                               port: 1000,
+                       }),
+               };
+               let encoded_value = init_msg.encode();
+               let target_value = hex::decode("000000000307017f00000103e8").unwrap();
+               assert_eq!(encoded_value, target_value);
+               assert_eq!(msgs::Init::read(&mut Cursor::new(&target_value)).unwrap(), init_msg);
        }
 
        #[test]
index bae9a2914fa8014a87b8a97015bc010d5580a366..346fb98b41eda3d895f9887575764db0ecc44503 100644 (file)
@@ -15,7 +15,7 @@ use chain::{ChannelMonitorUpdateErr, Confirm, Listen, Watch};
 use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS};
 use chain::transaction::OutPoint;
 use chain::keysinterface::KeysInterface;
-use ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, ChannelManagerReadArgs, PaymentId, PaymentSendFailure};
+use ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, ChannelManagerReadArgs, MPP_TIMEOUT_TICKS, PaymentId, PaymentSendFailure};
 use ln::features::{InitFeatures, InvoiceFeatures};
 use ln::msgs;
 use ln::msgs::ChannelMessageHandler;
@@ -199,6 +199,78 @@ fn mpp_retry() {
        claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
 }
 
+fn do_mpp_receive_timeout(send_partial_mpp: bool) {
+       let chanmon_cfgs = create_chanmon_cfgs(4);
+       let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+       let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+       let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+       let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+       let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+       let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+
+       let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 100_000);
+       let path = route.paths[0].clone();
+       route.paths.push(path);
+       route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
+       route.paths[0][0].short_channel_id = chan_1_id;
+       route.paths[0][1].short_channel_id = chan_3_id;
+       route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
+       route.paths[1][0].short_channel_id = chan_2_id;
+       route.paths[1][1].short_channel_id = chan_4_id;
+
+       // Initiate the MPP payment.
+       let _ = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+       check_added_monitors!(nodes[0], 2); // one monitor per path
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 2);
+
+       // Pass half of the payment along the first path.
+       pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 200_000, payment_hash, Some(payment_secret), events.remove(0), false, None);
+
+       if send_partial_mpp {
+               // Time out the partial MPP
+               for _ in 0..MPP_TIMEOUT_TICKS {
+                       nodes[3].node.timer_tick_occurred();
+               }
+
+               // Failed HTLC from node 3 -> 1
+               expect_pending_htlcs_forwardable!(nodes[3]);
+               let htlc_fail_updates_3_1 = get_htlc_update_msgs!(nodes[3], nodes[1].node.get_our_node_id());
+               assert_eq!(htlc_fail_updates_3_1.update_fail_htlcs.len(), 1);
+               nodes[1].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &htlc_fail_updates_3_1.update_fail_htlcs[0]);
+               check_added_monitors!(nodes[3], 1);
+               commitment_signed_dance!(nodes[1], nodes[3], htlc_fail_updates_3_1.commitment_signed, false);
+
+               // Failed HTLC from node 1 -> 0
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               let htlc_fail_updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               assert_eq!(htlc_fail_updates_1_0.update_fail_htlcs.len(), 1);
+               nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates_1_0.update_fail_htlcs[0]);
+               check_added_monitors!(nodes[1], 1);
+               commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates_1_0.commitment_signed, false);
+
+               expect_payment_failed_conditions!(nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(23, &[][..]));
+       } else {
+               // Pass half of the payment along the second path.
+               pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash, Some(payment_secret), events.remove(0), true, None);
+
+               // Even after MPP_TIMEOUT_TICKS we should not timeout the MPP if we have all the parts
+               for _ in 0..MPP_TIMEOUT_TICKS {
+                       nodes[3].node.timer_tick_occurred();
+               }
+
+               claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
+       }
+}
+
+#[test]
+fn mpp_receive_timeout() {
+       do_mpp_receive_timeout(true);
+       do_mpp_receive_timeout(false);
+}
+
 #[test]
 fn retry_expired_payment() {
        let chanmon_cfgs = create_chanmon_cfgs(3);
@@ -377,12 +449,12 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
 
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known()});
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
        // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an
        // error, as the channel has hit the chain.
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known()});
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
        let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
        let as_err = nodes[0].node.get_and_clear_pending_msg_events();
index d3ac2ebe9a39bf2e918f58dbf4d012ac0434ae99..d12f8c06eed0409630864359e54c20d3b714930d 100644 (file)
@@ -19,7 +19,7 @@ use bitcoin::secp256k1::key::{SecretKey,PublicKey};
 
 use ln::features::InitFeatures;
 use ln::msgs;
-use ln::msgs::{ChannelMessageHandler, LightningError, RoutingMessageHandler};
+use ln::msgs::{ChannelMessageHandler, LightningError, NetAddress, RoutingMessageHandler};
 use ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager};
 use util::ser::{VecWriter, Writeable, Writer};
 use ln::peer_channel_encryptor::{PeerChannelEncryptor,NextNoiseStep};
@@ -69,7 +69,7 @@ impl RoutingMessageHandler for IgnoringMessageHandler {
        fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) ->
                Vec<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> { Vec::new() }
        fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec<msgs::NodeAnnouncement> { Vec::new() }
-       fn sync_routing_table(&self, _their_node_id: &PublicKey, _init: &msgs::Init) {}
+       fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) {}
        fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
        fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
        fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::QueryChannelRange) -> Result<(), LightningError> { Ok(()) }
@@ -324,6 +324,7 @@ struct Peer {
        channel_encryptor: PeerChannelEncryptor,
        their_node_id: Option<PublicKey>,
        their_features: Option<InitFeatures>,
+       their_net_address: Option<NetAddress>,
 
        pending_outbound_buffer: LinkedList<Vec<u8>>,
        pending_outbound_buffer_first_msg_offset: usize,
@@ -376,6 +377,8 @@ struct PeerHolder<Descriptor: SocketDescriptor> {
 /// lifetimes). Other times you can afford a reference, which is more efficient, in which case
 /// SimpleRefPeerManager is the more appropriate type. Defining these type aliases prevents
 /// issues such as overly long function definitions.
+///
+/// (C-not exported) as Arcs don't make sense in bindings
 pub type SimpleArcPeerManager<SD, M, T, F, C, L> = PeerManager<SD, Arc<SimpleArcChannelManager<M, T, F, L>>, Arc<NetGraphMsgHandler<Arc<NetworkGraph>, Arc<C>, Arc<L>>>, Arc<L>, Arc<IgnoringMessageHandler>>;
 
 /// SimpleRefPeerManager is a type alias for a PeerManager reference, and is the reference
@@ -384,6 +387,8 @@ pub type SimpleArcPeerManager<SD, M, T, F, C, L> = PeerManager<SD, Arc<SimpleArc
 /// usage of lightning-net-tokio (since tokio::spawn requires parameters with static lifetimes).
 /// But if this is not necessary, using a reference is more efficient. Defining these type aliases
 /// helps with issues such as long function definitions.
+///
+/// (C-not exported) as Arcs don't make sense in bindings
 pub type SimpleRefPeerManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, SD, M, T, F, C, L> = PeerManager<SD, SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L>, &'e NetGraphMsgHandler<&'g NetworkGraph, &'h C, &'f L>, &'f L, IgnoringMessageHandler>;
 
 /// A PeerManager manages a set of peers, described by their [`SocketDescriptor`] and marshalls
@@ -495,6 +500,36 @@ impl core::fmt::Display for OptionalFromDebugger<'_> {
        }
 }
 
+/// A function used to filter out local or private addresses
+/// https://www.iana.org./assignments/ipv4-address-space/ipv4-address-space.xhtml
+/// https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml
+fn filter_addresses(ip_address: Option<NetAddress>) -> Option<NetAddress> {
+       match ip_address{
+               // For IPv4 range 10.0.0.0 - 10.255.255.255 (10/8)
+               Some(NetAddress::IPv4{addr: [10, _, _, _], port: _}) => None,
+               // For IPv4 range 0.0.0.0 - 0.255.255.255 (0/8)
+               Some(NetAddress::IPv4{addr: [0, _, _, _], port: _}) => None,
+               // For IPv4 range 100.64.0.0 - 100.127.255.255 (100.64/10)
+               Some(NetAddress::IPv4{addr: [100, 64..=127, _, _], port: _}) => None,
+               // For IPv4 range       127.0.0.0 - 127.255.255.255 (127/8)
+               Some(NetAddress::IPv4{addr: [127, _, _, _], port: _}) => None,
+               // For IPv4 range       169.254.0.0 - 169.254.255.255 (169.254/16)
+               Some(NetAddress::IPv4{addr: [169, 254, _, _], port: _}) => None,
+               // For IPv4 range 172.16.0.0 - 172.31.255.255 (172.16/12)
+               Some(NetAddress::IPv4{addr: [172, 16..=31, _, _], port: _}) => None,
+               // For IPv4 range 192.168.0.0 - 192.168.255.255 (192.168/16)
+               Some(NetAddress::IPv4{addr: [192, 168, _, _], port: _}) => None,
+               // For IPv4 range 192.88.99.0 - 192.88.99.255  (192.88.99/24)
+               Some(NetAddress::IPv4{addr: [192, 88, 99, _], port: _}) => None,
+               // For IPv6 range 2000:0000:0000:0000:0000:0000:0000:0000 - 3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff (2000::/3)
+               Some(NetAddress::IPv6{addr: [0x20..=0x3F, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], port: _}) => ip_address,
+               // For remaining addresses
+               Some(NetAddress::IPv6{addr: _, port: _}) => None,
+               Some(..) => ip_address,
+               None => None,
+       }
+}
+
 impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> PeerManager<Descriptor, CM, RM, L, CMH> where
                CM::Target: ChannelMessageHandler,
                RM::Target: RoutingMessageHandler,
@@ -543,7 +578,13 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                SecretKey::from_slice(&Sha256::from_engine(ephemeral_hash).into_inner()).expect("You broke SHA-256!")
        }
 
-       /// Indicates a new outbound connection has been established to a node with the given node_id.
+       /// Indicates a new outbound connection has been established to a node with the given node_id
+       /// and an optional remote network address.
+       ///
+       /// The remote network address adds the option to report a remote IP address back to a connecting
+       /// peer using the init message.
+       /// The user should pass the remote network address of the host they are connected to.
+       ///
        /// Note that if an Err is returned here you MUST NOT call socket_disconnected for the new
        /// descriptor but must disconnect the connection immediately.
        ///
@@ -553,7 +594,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
        /// [`socket_disconnected()`].
        ///
        /// [`socket_disconnected()`]: PeerManager::socket_disconnected
-       pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor) -> Result<Vec<u8>, PeerHandleError> {
+       pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor, remote_network_address: Option<NetAddress>) -> Result<Vec<u8>, PeerHandleError> {
                let mut peer_encryptor = PeerChannelEncryptor::new_outbound(their_node_id.clone(), self.get_ephemeral_key());
                let res = peer_encryptor.get_act_one().to_vec();
                let pending_read_buffer = [0; 50].to_vec(); // Noise act two is 50 bytes
@@ -563,6 +604,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                        channel_encryptor: peer_encryptor,
                        their_node_id: None,
                        their_features: None,
+                       their_net_address: remote_network_address,
 
                        pending_outbound_buffer: LinkedList::new(),
                        pending_outbound_buffer_first_msg_offset: 0,
@@ -583,7 +625,12 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                Ok(res)
        }
 
-       /// Indicates a new inbound connection has been established.
+       /// Indicates a new inbound connection has been established to a node with an optional remote
+       /// network address.
+       ///
+       /// The remote network address adds the option to report a remote IP address back to a connecting
+       /// peer using the init message.
+       /// The user should pass the remote network address of the host they are connected to.
        ///
        /// May refuse the connection by returning an Err, but will never write bytes to the remote end
        /// (outbound connector always speaks first). Note that if an Err is returned here you MUST NOT
@@ -594,7 +641,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
        /// [`socket_disconnected()`].
        ///
        /// [`socket_disconnected()`]: PeerManager::socket_disconnected
-       pub fn new_inbound_connection(&self, descriptor: Descriptor) -> Result<(), PeerHandleError> {
+       pub fn new_inbound_connection(&self, descriptor: Descriptor, remote_network_address: Option<NetAddress>) -> Result<(), PeerHandleError> {
                let peer_encryptor = PeerChannelEncryptor::new_inbound(&self.our_node_secret);
                let pending_read_buffer = [0; 50].to_vec(); // Noise act one is 50 bytes
 
@@ -603,6 +650,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                        channel_encryptor: peer_encryptor,
                        their_node_id: None,
                        their_features: None,
+                       their_net_address: remote_network_address,
 
                        pending_outbound_buffer: LinkedList::new(),
                        pending_outbound_buffer_first_msg_offset: 0,
@@ -864,7 +912,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                                                        peer.their_node_id = Some(their_node_id);
                                                                        insert_node_id!();
                                                                        let features = InitFeatures::known();
-                                                                       let resp = msgs::Init { features };
+                                                                       let resp = msgs::Init { features, remote_network_address: filter_addresses(peer.their_net_address.clone())};
                                                                        self.enqueue_message(peer, &resp);
                                                                        peer.awaiting_pong_timer_tick_intervals = 0;
                                                                },
@@ -875,7 +923,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                                                        peer.their_node_id = Some(their_node_id);
                                                                        insert_node_id!();
                                                                        let features = InitFeatures::known();
-                                                                       let resp = msgs::Init { features };
+                                                                       let resp = msgs::Init { features, remote_network_address: filter_addresses(peer.their_net_address.clone())};
                                                                        self.enqueue_message(peer, &resp);
                                                                        peer.awaiting_pong_timer_tick_intervals = 0;
                                                                },
@@ -1018,7 +1066,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                        return Err(PeerHandleError{ no_connection_possible: true }.into());
                                }
 
-                               self.message_handler.route_handler.sync_routing_table(&peer.their_node_id.unwrap(), &msg);
+                               self.message_handler.route_handler.peer_connected(&peer.their_node_id.unwrap(), &msg);
 
                                self.message_handler.chan_handler.peer_connected(&peer.their_node_id.unwrap(), &msg);
                                peer.their_features = Some(msg.features);
@@ -1477,6 +1525,9 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                                        msg.sync_complete);
                                                self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
                                        }
+                                       MessageSendEvent::SendGossipTimestampFilter { ref node_id, ref msg } => {
+                                               self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+                                       }
                                }
                        }
 
@@ -1663,8 +1714,9 @@ fn is_gossip_msg(type_id: u16) -> bool {
 
 #[cfg(test)]
 mod tests {
-       use ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
+       use ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses};
        use ln::msgs;
+       use ln::msgs::NetAddress;
        use util::events;
        use util::test_utils;
 
@@ -1740,8 +1792,8 @@ mod tests {
                let a_id = PublicKey::from_secret_key(&secp_ctx, &peer_a.our_node_secret);
                let mut fd_a = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
                let mut fd_b = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
-               let initial_data = peer_b.new_outbound_connection(a_id, fd_b.clone()).unwrap();
-               peer_a.new_inbound_connection(fd_a.clone()).unwrap();
+               let initial_data = peer_b.new_outbound_connection(a_id, fd_b.clone(), None).unwrap();
+               peer_a.new_inbound_connection(fd_a.clone(), None).unwrap();
                assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false);
                peer_a.process_events();
                assert_eq!(peer_b.read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(), false);
@@ -1848,8 +1900,8 @@ mod tests {
                let a_id = PublicKey::from_secret_key(&secp_ctx, &peers[0].our_node_secret);
                let mut fd_a = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
                let mut fd_b = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
-               let initial_data = peers[1].new_outbound_connection(a_id, fd_b.clone()).unwrap();
-               peers[0].new_inbound_connection(fd_a.clone()).unwrap();
+               let initial_data = peers[1].new_outbound_connection(a_id, fd_b.clone(), None).unwrap();
+               peers[0].new_inbound_connection(fd_a.clone(), None).unwrap();
 
                // If we get a single timer tick before completion, that's fine
                assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1);
@@ -1867,4 +1919,100 @@ mod tests {
 
                assert!(peers[0].read_event(&mut fd_a, &fd_b.outbound_data.lock().unwrap().split_off(0)).is_err());
        }
+
+       #[test]
+       fn test_filter_addresses(){
+               // Tests the filter_addresses function.
+
+               // For (10/8)
+               let ip_address = NetAddress::IPv4{addr: [10, 0, 0, 0], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+               let ip_address = NetAddress::IPv4{addr: [10, 0, 255, 201], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+               let ip_address = NetAddress::IPv4{addr: [10, 255, 255, 255], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+               // For (0/8)
+               let ip_address = NetAddress::IPv4{addr: [0, 0, 0, 0], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+               let ip_address = NetAddress::IPv4{addr: [0, 0, 255, 187], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+               let ip_address = NetAddress::IPv4{addr: [0, 255, 255, 255], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+               // For (100.64/10)
+               let ip_address = NetAddress::IPv4{addr: [100, 64, 0, 0], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+               let ip_address = NetAddress::IPv4{addr: [100, 78, 255, 0], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+               let ip_address = NetAddress::IPv4{addr: [100, 127, 255, 255], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+               // For (127/8)
+               let ip_address = NetAddress::IPv4{addr: [127, 0, 0, 0], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+               let ip_address = NetAddress::IPv4{addr: [127, 65, 73, 0], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+               let ip_address = NetAddress::IPv4{addr: [127, 255, 255, 255], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+               // For (169.254/16)
+               let ip_address = NetAddress::IPv4{addr: [169, 254, 0, 0], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+               let ip_address = NetAddress::IPv4{addr: [169, 254, 221, 101], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+               let ip_address = NetAddress::IPv4{addr: [169, 254, 255, 255], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+               // For (172.16/12)
+               let ip_address = NetAddress::IPv4{addr: [172, 16, 0, 0], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+               let ip_address = NetAddress::IPv4{addr: [172, 27, 101, 23], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+               let ip_address = NetAddress::IPv4{addr: [172, 31, 255, 255], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+               // For (192.168/16)
+               let ip_address = NetAddress::IPv4{addr: [192, 168, 0, 0], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+               let ip_address = NetAddress::IPv4{addr: [192, 168, 205, 159], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+               let ip_address = NetAddress::IPv4{addr: [192, 168, 255, 255], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+               // For (192.88.99/24)
+               let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 0], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+               let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 140], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+               let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 255], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+               // For other IPv4 addresses
+               let ip_address = NetAddress::IPv4{addr: [188, 255, 99, 0], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
+               let ip_address = NetAddress::IPv4{addr: [123, 8, 129, 14], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
+               let ip_address = NetAddress::IPv4{addr: [2, 88, 9, 255], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
+
+               // For (2000::/3)
+               let ip_address = NetAddress::IPv6{addr: [32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
+               let ip_address = NetAddress::IPv6{addr: [45, 34, 209, 190, 0, 123, 55, 34, 0, 0, 3, 27, 201, 0, 0, 0], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
+               let ip_address = NetAddress::IPv6{addr: [63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
+
+               // For other IPv6 addresses
+               let ip_address = NetAddress::IPv6{addr: [24, 240, 12, 32, 0, 0, 0, 0, 20, 97, 0, 32, 121, 254, 0, 0], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+               let ip_address = NetAddress::IPv6{addr: [68, 23, 56, 63, 0, 0, 2, 7, 75, 109, 0, 39, 0, 0, 0, 0], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+               let ip_address = NetAddress::IPv6{addr: [101, 38, 140, 230, 100, 0, 30, 98, 0, 26, 0, 0, 57, 96, 0, 0], port: 1000};
+               assert_eq!(filter_addresses(Some(ip_address.clone())), None);
+
+               // For (None)
+               assert_eq!(filter_addresses(None), None);
+       }
 }
index 996d64f377cc8e4a60bef0c152cfe0e13ab0f8bd..d3d54467a2f421551d381be64e56d76c00f639cd 100644 (file)
@@ -138,8 +138,8 @@ fn test_priv_forwarding_rejection() {
        check_added_monitors!(nodes[1], 2);
        nodes[1].node = &nodes_1_deserialized;
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
        let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
@@ -147,8 +147,8 @@ fn test_priv_forwarding_rejection() {
        get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
        get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
 
-       nodes[1].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
-       nodes[2].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[1].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
+       nodes[2].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[2].node.get_our_node_id());
        let cs_reestablish = get_event_msg!(nodes[2], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
        nodes[2].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
index cd0f77448a5d5c2c587286dcc7ec9612d5c9ad8c..557aff84c0dad7c6d595e4a6da6f1bcf24d6fced 100644 (file)
@@ -245,9 +245,9 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let node_0_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let node_1_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish);
@@ -305,9 +305,9 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
        if recv_count == 0 {
                // If all closing_signeds weren't delivered we can just resume where we left off...
                let node_0_2nd_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
index d113acbfc7d4d5e50a17d2bea7012f67fa845285..e7db446a86f17f036ceeccba9aca52c4aa7ea652 100644 (file)
@@ -505,7 +505,7 @@ mod tests {
                let mut reader = io::Cursor::new(buffer);
                let decoded_msg = read(&mut reader, &IgnoringMessageHandler{}).unwrap();
                match decoded_msg {
-                       Message::Init(msgs::Init { features }) => {
+                       Message::Init(msgs::Init { features, .. }) => {
                                assert!(features.supports_variable_length_onion());
                                assert!(features.supports_upfront_shutdown_script());
                                assert!(features.supports_gossip_queries());
index a5e497bb1cf0f279137fd3f5e923e8972317181d..282c87051dd48f2c3b3a82db778cc20a78f7e2c5 100644 (file)
@@ -25,7 +25,7 @@ use chain;
 use chain::Access;
 use ln::features::{ChannelFeatures, NodeFeatures};
 use ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT};
-use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, OptionalField};
+use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, OptionalField, GossipTimestampFilter};
 use ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd};
 use ln::msgs;
 use util::ser::{Writeable, Readable, Writer};
@@ -395,74 +395,97 @@ where C::Target: chain::Access, L::Target: Logger
        /// to request gossip messages for each channel. The sync is considered complete
        /// when the final reply_scids_end message is received, though we are not
        /// tracking this directly.
-       fn sync_routing_table(&self, their_node_id: &PublicKey, init_msg: &Init) {
-
+       fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &Init) {
                // We will only perform a sync with peers that support gossip_queries.
                if !init_msg.features.supports_gossip_queries() {
                        return ();
                }
 
-               // Check if we need to perform a full synchronization with this peer
-               if !self.should_request_full_sync(&their_node_id) {
-                       return ();
+               // The lightning network's gossip sync system is completely broken in numerous ways.
+               //
+               // Given no broadly-available set-reconciliation protocol, the only reasonable approach is
+               // to do a full sync from the first few peers we connect to, and then receive gossip
+               // updates from all our peers normally.
+               //
+               // Originally, we could simply tell a peer to dump us the entire gossip table on startup,
+               // wasting lots of bandwidth but ensuring we have the full network graph. After the initial
+               // dump peers would always send gossip and we'd stay up-to-date with whatever our peer has
+               // seen.
+               //
+               // In order to reduce the bandwidth waste, "gossip queries" were introduced, allowing you
+               // to ask for the SCIDs of all channels in your peer's routing graph, and then only request
+               // channel data which you are missing. Except there was no way at all to identify which
+               // `channel_update`s you were missing, so you still had to request everything, just in a
+               // very complicated way with some queries instead of just getting the dump.
+               //
+               // Later, an option was added to fetch the latest timestamps of the `channel_update`s to
+               // make efficient sync possible, however it has yet to be implemented in lnd, which makes
+               // relying on it useless.
+               //
+               // After gossip queries were introduced, support for receiving a full gossip table dump on
+               // connection was removed from several nodes, making it impossible to get a full sync
+               // without using the "gossip queries" messages.
+               //
+               // Once you opt into "gossip queries" the only way to receive any gossip updates that a
+               // peer receives after you connect, you must send a `gossip_timestamp_filter` message. This
+               // message, as the name implies, tells the peer to not forward any gossip messages with a
+               // timestamp older than a given value (not the time the peer received the filter, but the
+               // timestamp in the update message, which is often hours behind when the peer received the
+               // message).
+               //
+               // Obnoxiously, `gossip_timestamp_filter` isn't *just* a filter, but its also a request for
+               // your peer to send you the full routing graph (subject to the filter). Thus, in order to
+               // tell a peer to send you any updates as it sees them, you have to also ask for the full
+               // routing graph to be synced. If you set a timestamp filter near the current time, peers
+               // will simply not forward any new updates they see to you which were generated some time
+               // ago (which is not uncommon). If you instead set a timestamp filter near 0 (or two weeks
+               // ago), you will always get the full routing graph from all your peers.
+               //
+               // Most lightning nodes today opt to simply turn off receiving gossip data which only
+               // propagated some time after it was generated, and, worse, often disable gossiping with
+               // several peers after their first connection. The second behavior can cause gossip to not
+               // propagate fully if there are cuts in the gossiping subgraph.
+               //
+               // In an attempt to cut a middle ground between always fetching the full graph from all of
+               // our peers and never receiving gossip from peers at all, we send all of our peers a
+               // `gossip_timestamp_filter`, with the filter time set either two weeks ago or an hour ago.
+               //
+               // For no-std builds, we bury our head in the sand and do a full sync on each connection.
+               let should_request_full_sync = self.should_request_full_sync(&their_node_id);
+               #[allow(unused_mut, unused_assignments)]
+               let mut gossip_start_time = 0;
+               #[cfg(feature = "std")]
+               {
+                       gossip_start_time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs();
+                       if should_request_full_sync {
+                               gossip_start_time -= 60 * 60 * 24 * 7 * 2; // 2 weeks ago
+                       } else {
+                               gossip_start_time -= 60 * 60; // an hour ago
+                       }
                }
 
-               let first_blocknum = 0;
-               let number_of_blocks = 0xffffffff;
-               log_debug!(self.logger, "Sending query_channel_range peer={}, first_blocknum={}, number_of_blocks={}", log_pubkey!(their_node_id), first_blocknum, number_of_blocks);
                let mut pending_events = self.pending_events.lock().unwrap();
-               pending_events.push(MessageSendEvent::SendChannelRangeQuery {
+               pending_events.push(MessageSendEvent::SendGossipTimestampFilter {
                        node_id: their_node_id.clone(),
-                       msg: QueryChannelRange {
+                       msg: GossipTimestampFilter {
                                chain_hash: self.network_graph.genesis_hash,
-                               first_blocknum,
-                               number_of_blocks,
+                               first_timestamp: gossip_start_time as u32, // 2106 issue!
+                               timestamp_range: u32::max_value(),
                        },
                });
        }
 
-       /// Statelessly processes a reply to a channel range query by immediately
-       /// sending an SCID query with SCIDs in the reply. To keep this handler
-       /// stateless, it does not validate the sequencing of replies for multi-
-       /// reply ranges. It does not validate whether the reply(ies) cover the
-       /// queried range. It also does not filter SCIDs to only those in the
-       /// original query range. We also do not validate that the chain_hash
-       /// matches the chain_hash of the NetworkGraph. Any chan_ann message that
-       /// does not match our chain_hash will be rejected when the announcement is
-       /// processed.
-       fn handle_reply_channel_range(&self, their_node_id: &PublicKey, msg: ReplyChannelRange) -> Result<(), LightningError> {
-               log_debug!(self.logger, "Handling reply_channel_range peer={}, first_blocknum={}, number_of_blocks={}, sync_complete={}, scids={}", log_pubkey!(their_node_id), msg.first_blocknum, msg.number_of_blocks, msg.sync_complete, msg.short_channel_ids.len(),);
-
-               log_debug!(self.logger, "Sending query_short_channel_ids peer={}, batch_size={}", log_pubkey!(their_node_id), msg.short_channel_ids.len());
-               let mut pending_events = self.pending_events.lock().unwrap();
-               pending_events.push(MessageSendEvent::SendShortIdsQuery {
-                       node_id: their_node_id.clone(),
-                       msg: QueryShortChannelIds {
-                               chain_hash: msg.chain_hash,
-                               short_channel_ids: msg.short_channel_ids,
-                       }
-               });
-
+       fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> {
+               // We don't make queries, so should never receive replies. If, in the future, the set
+               // reconciliation extensions to gossip queries become broadly supported, we should revert
+               // this code to its state pre-0.0.106.
                Ok(())
        }
 
-       /// When an SCID query is initiated the remote peer will begin streaming
-       /// gossip messages. In the event of a failure, we may have received
-       /// some channel information. Before trying with another peer, the
-       /// caller should update its set of SCIDs that need to be queried.
-       fn handle_reply_short_channel_ids_end(&self, their_node_id: &PublicKey, msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> {
-               log_debug!(self.logger, "Handling reply_short_channel_ids_end peer={}, full_information={}", log_pubkey!(their_node_id), msg.full_information);
-
-               // If the remote node does not have up-to-date information for the
-               // chain_hash they will set full_information=false. We can fail
-               // the result and try again with a different peer.
-               if !msg.full_information {
-                       return Err(LightningError {
-                               err: String::from("Received reply_short_channel_ids_end with no information"),
-                               action: ErrorAction::IgnoreError
-                       });
-               }
-
+       fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> {
+               // We don't make queries, so should never receive replies. If, in the future, the set
+               // reconciliation extensions to gossip queries become broadly supported, we should revert
+               // this code to its state pre-0.0.106.
                Ok(())
        }
 
@@ -1362,8 +1385,8 @@ impl NetworkGraph {
                                                }
                                        }
                                }
-                               macro_rules! maybe_update_channel_info {
-                                       ( $target: expr, $src_node: expr) => {
+                               macro_rules! check_update_latest {
+                                       ($target: expr) => {
                                                if let Some(existing_chan_info) = $target.as_ref() {
                                                        // The timestamp field is somewhat of a misnomer - the BOLTs use it to
                                                        // order updates to ensure you always have the latest one, only
@@ -1380,7 +1403,11 @@ impl NetworkGraph {
                                                } else {
                                                        chan_was_enabled = false;
                                                }
+                                       }
+                               }
 
+                               macro_rules! get_new_channel_info {
+                                       () => { {
                                                let last_update_message = if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY
                                                        { full_msg.cloned() } else { None };
 
@@ -1396,29 +1423,31 @@ impl NetworkGraph {
                                                        },
                                                        last_update_message
                                                };
-                                               $target = Some(updated_channel_update_info);
-                                       }
+                                               Some(updated_channel_update_info)
+                                       } }
                                }
 
                                let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.encode()[..])[..]);
                                if msg.flags & 1 == 1 {
                                        dest_node_id = channel.node_one.clone();
+                                       check_update_latest!(channel.two_to_one);
                                        if let Some((sig, ctx)) = sig_info {
                                                secp_verify_sig!(ctx, &msg_hash, &sig, &PublicKey::from_slice(channel.node_two.as_slice()).map_err(|_| LightningError{
                                                        err: "Couldn't parse source node pubkey".to_owned(),
                                                        action: ErrorAction::IgnoreAndLog(Level::Debug)
                                                })?, "channel_update");
                                        }
-                                       maybe_update_channel_info!(channel.two_to_one, channel.node_two);
+                                       channel.two_to_one = get_new_channel_info!();
                                } else {
                                        dest_node_id = channel.node_two.clone();
+                                       check_update_latest!(channel.one_to_two);
                                        if let Some((sig, ctx)) = sig_info {
                                                secp_verify_sig!(ctx, &msg_hash, &sig, &PublicKey::from_slice(channel.node_one.as_slice()).map_err(|_| LightningError{
                                                        err: "Couldn't parse destination node pubkey".to_owned(),
                                                        action: ErrorAction::IgnoreAndLog(Level::Debug)
                                                })?, "channel_update");
                                        }
-                                       maybe_update_channel_info!(channel.one_to_two, channel.node_one);
+                                       channel.one_to_two = get_new_channel_info!();
                                }
                        }
                }
@@ -1521,7 +1550,7 @@ mod tests {
        use routing::network_graph::{NetGraphMsgHandler, NetworkGraph, NetworkUpdate, MAX_EXCESS_BYTES_FOR_RELAY};
        use ln::msgs::{Init, OptionalField, RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement,
                UnsignedChannelAnnouncement, ChannelAnnouncement, UnsignedChannelUpdate, ChannelUpdate, 
-               ReplyChannelRange, ReplyShortChannelIdsEnd, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT};
+               ReplyChannelRange, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT};
        use util::test_utils;
        use util::logger::Logger;
        use util::ser::{Readable, Writeable};
@@ -2258,140 +2287,43 @@ mod tests {
        }
 
        #[test]
+       #[cfg(feature = "std")]
        fn calling_sync_routing_table() {
+               use std::time::{SystemTime, UNIX_EPOCH};
+
                let network_graph = create_network_graph();
                let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph);
                let node_privkey_1 = &SecretKey::from_slice(&[42; 32]).unwrap();
                let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_privkey_1);
 
                let chain_hash = genesis_block(Network::Testnet).header.block_hash();
-               let first_blocknum = 0;
-               let number_of_blocks = 0xffff_ffff;
 
                // It should ignore if gossip_queries feature is not enabled
                {
-                       let init_msg = Init { features: InitFeatures::known().clear_gossip_queries() };
-                       net_graph_msg_handler.sync_routing_table(&node_id_1, &init_msg);
+                       let init_msg = Init { features: InitFeatures::known().clear_gossip_queries(), remote_network_address: None };
+                       net_graph_msg_handler.peer_connected(&node_id_1, &init_msg);
                        let events = net_graph_msg_handler.get_and_clear_pending_msg_events();
                        assert_eq!(events.len(), 0);
                }
 
-               // It should send a query_channel_message with the correct information
+               // It should send a gossip_timestamp_filter with the correct information
                {
-                       let init_msg = Init { features: InitFeatures::known() };
-                       net_graph_msg_handler.sync_routing_table(&node_id_1, &init_msg);
+                       let init_msg = Init { features: InitFeatures::known(), remote_network_address: None };
+                       net_graph_msg_handler.peer_connected(&node_id_1, &init_msg);
                        let events = net_graph_msg_handler.get_and_clear_pending_msg_events();
                        assert_eq!(events.len(), 1);
                        match &events[0] {
-                               MessageSendEvent::SendChannelRangeQuery{ node_id, msg } => {
+                               MessageSendEvent::SendGossipTimestampFilter{ node_id, msg } => {
                                        assert_eq!(node_id, &node_id_1);
                                        assert_eq!(msg.chain_hash, chain_hash);
-                                       assert_eq!(msg.first_blocknum, first_blocknum);
-                                       assert_eq!(msg.number_of_blocks, number_of_blocks);
+                                       let expected_timestamp = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs();
+                                       assert!((msg.first_timestamp as u64) >= expected_timestamp - 60*60*24*7*2);
+                                       assert!((msg.first_timestamp as u64) < expected_timestamp - 60*60*24*7*2 + 10);
+                                       assert_eq!(msg.timestamp_range, u32::max_value());
                                },
                                _ => panic!("Expected MessageSendEvent::SendChannelRangeQuery")
                        };
                }
-
-               // It should not enqueue a query when should_request_full_sync return false.
-               // The initial implementation allows syncing with the first 5 peers after
-               // which should_request_full_sync will return false
-               {
-                       let network_graph = create_network_graph();
-                       let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph);
-                       let init_msg = Init { features: InitFeatures::known() };
-                       for n in 1..7 {
-                               let node_privkey = &SecretKey::from_slice(&[n; 32]).unwrap();
-                               let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey);
-                               net_graph_msg_handler.sync_routing_table(&node_id, &init_msg);
-                               let events = net_graph_msg_handler.get_and_clear_pending_msg_events();
-                               if n <= 5 {
-                                       assert_eq!(events.len(), 1);
-                               } else {
-                                       assert_eq!(events.len(), 0);
-                               }
-
-                       }
-               }
-       }
-
-       #[test]
-       fn handling_reply_channel_range() {
-               let network_graph = create_network_graph();
-               let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph);
-               let node_privkey_1 = &SecretKey::from_slice(&[42; 32]).unwrap();
-               let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_privkey_1);
-
-               let chain_hash = genesis_block(Network::Testnet).header.block_hash();
-
-               // Test receipt of a single reply that should enqueue an SCID query
-               // matching the SCIDs in the reply
-               {
-                       let result = net_graph_msg_handler.handle_reply_channel_range(&node_id_1, ReplyChannelRange {
-                               chain_hash,
-                               sync_complete: true,
-                               first_blocknum: 0,
-                               number_of_blocks: 2000,
-                               short_channel_ids: vec![
-                                       0x0003e0_000000_0000, // 992x0x0
-                                       0x0003e8_000000_0000, // 1000x0x0
-                                       0x0003e9_000000_0000, // 1001x0x0
-                                       0x0003f0_000000_0000, // 1008x0x0
-                                       0x00044c_000000_0000, // 1100x0x0
-                                       0x0006e0_000000_0000, // 1760x0x0
-                               ],
-                       });
-                       assert!(result.is_ok());
-
-                       // We expect to emit a query_short_channel_ids message with the received scids
-                       let events = net_graph_msg_handler.get_and_clear_pending_msg_events();
-                       assert_eq!(events.len(), 1);
-                       match &events[0] {
-                               MessageSendEvent::SendShortIdsQuery { node_id, msg } => {
-                                       assert_eq!(node_id, &node_id_1);
-                                       assert_eq!(msg.chain_hash, chain_hash);
-                                       assert_eq!(msg.short_channel_ids, vec![
-                                               0x0003e0_000000_0000, // 992x0x0
-                                               0x0003e8_000000_0000, // 1000x0x0
-                                               0x0003e9_000000_0000, // 1001x0x0
-                                               0x0003f0_000000_0000, // 1008x0x0
-                                               0x00044c_000000_0000, // 1100x0x0
-                                               0x0006e0_000000_0000, // 1760x0x0
-                                       ]);
-                               },
-                               _ => panic!("expected MessageSendEvent::SendShortIdsQuery"),
-                       }
-               }
-       }
-
-       #[test]
-       fn handling_reply_short_channel_ids() {
-               let network_graph = create_network_graph();
-               let (secp_ctx, net_graph_msg_handler) = create_net_graph_msg_handler(&network_graph);
-               let node_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
-               let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey);
-
-               let chain_hash = genesis_block(Network::Testnet).header.block_hash();
-
-               // Test receipt of a successful reply
-               {
-                       let result = net_graph_msg_handler.handle_reply_short_channel_ids_end(&node_id, ReplyShortChannelIdsEnd {
-                               chain_hash,
-                               full_information: true,
-                       });
-                       assert!(result.is_ok());
-               }
-
-               // Test receipt of a reply that indicates the peer does not maintain up-to-date information
-               // for the chain_hash requested in the query.
-               {
-                       let result = net_graph_msg_handler.handle_reply_short_channel_ids_end(&node_id, ReplyShortChannelIdsEnd {
-                               chain_hash,
-                               full_information: false,
-                       });
-                       assert!(result.is_err());
-                       assert_eq!(result.err().unwrap().err, "Received reply_short_channel_ids_end with no information");
-               }
        }
 
        #[test]
index 34d4cd32fa18672903295ed54addaa1d173fbfa6..9f202557ff7066d485e59c0ce0a9710ae687e1be 100644 (file)
@@ -332,11 +332,9 @@ struct RouteGraphNode {
 impl cmp::Ord for RouteGraphNode {
        fn cmp(&self, other: &RouteGraphNode) -> cmp::Ordering {
                let other_score = cmp::max(other.lowest_fee_to_peer_through_node, other.path_htlc_minimum_msat)
-                       .checked_add(other.path_penalty_msat)
-                       .unwrap_or_else(|| u64::max_value());
+                       .saturating_add(other.path_penalty_msat);
                let self_score = cmp::max(self.lowest_fee_to_peer_through_node, self.path_htlc_minimum_msat)
-                       .checked_add(self.path_penalty_msat)
-                       .unwrap_or_else(|| u64::max_value());
+                       .saturating_add(self.path_penalty_msat);
                other_score.cmp(&self_score).then_with(|| other.node_id.cmp(&self.node_id))
        }
 }
@@ -495,6 +493,10 @@ impl<'a> PaymentPath<'a> {
                self.hops.last().unwrap().0.fee_msat
        }
 
+       fn get_path_penalty_msat(&self) -> u64 {
+               self.hops.first().map(|h| h.0.path_penalty_msat).unwrap_or(u64::max_value())
+       }
+
        fn get_total_fee_paid_msat(&self) -> u64 {
                if self.hops.len() < 1 {
                        return 0;
@@ -645,7 +647,7 @@ where L::Target: Logger {
 pub(crate) fn get_route<L: Deref, S: Score>(
        our_node_pubkey: &PublicKey, payment_params: &PaymentParameters, network_graph: &ReadOnlyNetworkGraph,
        first_hops: Option<&[&ChannelDetails]>, final_value_msat: u64, final_cltv_expiry_delta: u32,
-       logger: L, scorer: &S, _random_seed_bytes: &[u8; 32]
+       logger: L, scorer: &S, random_seed_bytes: &[u8; 32]
 ) -> Result<Route, LightningError>
 where L::Target: Logger {
        let payee_node_id = NodeId::from_pubkey(&payment_params.payee_pubkey);
@@ -670,6 +672,9 @@ where L::Target: Logger {
                        }
                }
        }
+       if payment_params.max_total_cltv_expiry_delta <= final_cltv_expiry_delta {
+               return Err(LightningError{err: "Can't find a route where the maximum total CLTV expiry delta is below the final CLTV expiry.".to_owned(), action: ErrorAction::IgnoreError});
+       }
 
        // The general routing idea is the following:
        // 1. Fill first/last hops communicated by the caller.
@@ -808,6 +813,29 @@ where L::Target: Logger {
        // - when we want to stop looking for new paths.
        let mut already_collected_value_msat = 0;
 
+       for (_, channels) in first_hop_targets.iter_mut() {
+               // Sort the first_hops channels to the same node(s) in priority order of which channel we'd
+               // most like to use.
+               //
+               // First, if channels are below `recommended_value_msat`, sort them in descending order,
+               // preferring larger channels to avoid splitting the payment into more MPP parts than is
+               // required.
+               //
+               // Second, because simply always sorting in descending order would always use our largest
+               // available outbound capacity, needlessly fragmenting our available channel capacities,
+               // sort channels above `recommended_value_msat` in ascending order, preferring channels
+               // which have enough, but not too much, capacity for the payment.
+               channels.sort_unstable_by(|chan_a, chan_b| {
+                       if chan_b.outbound_capacity_msat < recommended_value_msat || chan_a.outbound_capacity_msat < recommended_value_msat {
+                               // Sort in descending order
+                               chan_b.outbound_capacity_msat.cmp(&chan_a.outbound_capacity_msat)
+                       } else {
+                               // Sort in ascending order
+                               chan_a.outbound_capacity_msat.cmp(&chan_b.outbound_capacity_msat)
+                       }
+               });
+       }
+
        log_trace!(logger, "Building path from {} (payee) to {} (us/payer) for value {} msat.", payment_params.payee_pubkey, our_node_pubkey, final_value_msat);
 
        macro_rules! add_entry {
@@ -830,7 +858,7 @@ where L::Target: Logger {
                                        .entry(short_channel_id)
                                        .or_insert_with(|| $candidate.effective_capacity().as_msat());
 
-                               // It is tricky to substract $next_hops_fee_msat from available liquidity here.
+                               // It is tricky to subtract $next_hops_fee_msat from available liquidity here.
                                // It may be misleading because we might later choose to reduce the value transferred
                                // over these channels, and the channel which was insufficient might become sufficient.
                                // Worst case: we drop a good channel here because it can't cover the high following
@@ -866,12 +894,11 @@ where L::Target: Logger {
                                        // In order to already account for some of the privacy enhancing random CLTV
                                        // expiry delta offset we add on top later, we subtract a rough estimate
                                        // (2*MEDIAN_HOP_CLTV_EXPIRY_DELTA) here.
-                                       let max_total_cltv_expiry_delta = payment_params.max_total_cltv_expiry_delta
+                                       let max_total_cltv_expiry_delta = (payment_params.max_total_cltv_expiry_delta - final_cltv_expiry_delta)
                                                .checked_sub(2*MEDIAN_HOP_CLTV_EXPIRY_DELTA)
-                                               .unwrap_or(payment_params.max_total_cltv_expiry_delta);
+                                               .unwrap_or(payment_params.max_total_cltv_expiry_delta - final_cltv_expiry_delta);
                                        let hop_total_cltv_delta = ($next_hops_cltv_delta as u32)
-                                               .checked_add($candidate.cltv_expiry_delta())
-                                               .unwrap_or(u32::max_value());
+                                               .saturating_add($candidate.cltv_expiry_delta());
                                        let doesnt_exceed_cltv_delta_limit = hop_total_cltv_delta <= max_total_cltv_expiry_delta;
 
                                        let value_contribution_msat = cmp::min(available_value_contribution_msat, $next_hops_value_contribution);
@@ -978,9 +1005,9 @@ where L::Target: Logger {
                                                                }
                                                        }
 
-                                                       let path_penalty_msat = $next_hops_path_penalty_msat.checked_add(
-                                                               scorer.channel_penalty_msat(short_channel_id, amount_to_transfer_over_msat, *available_liquidity_msat,
-                                                                       &$src_node_id, &$dest_node_id)).unwrap_or_else(|| u64::max_value());
+                                                       let path_penalty_msat = $next_hops_path_penalty_msat.saturating_add(
+                                                               scorer.channel_penalty_msat(short_channel_id, amount_to_transfer_over_msat,
+                                                                       *available_liquidity_msat, &$src_node_id, &$dest_node_id));
                                                        let new_graph_node = RouteGraphNode {
                                                                node_id: $src_node_id,
                                                                lowest_fee_to_peer_through_node: total_fee_msat,
@@ -1008,11 +1035,9 @@ where L::Target: Logger {
                                                        // the fees included in $next_hops_path_htlc_minimum_msat, but also
                                                        // can't use something that may decrease on future hops.
                                                        let old_cost = cmp::max(old_entry.total_fee_msat, old_entry.path_htlc_minimum_msat)
-                                                               .checked_add(old_entry.path_penalty_msat)
-                                                               .unwrap_or_else(|| u64::max_value());
+                                                               .saturating_add(old_entry.path_penalty_msat);
                                                        let new_cost = cmp::max(total_fee_msat, path_htlc_minimum_msat)
-                                                               .checked_add(path_penalty_msat)
-                                                               .unwrap_or_else(|| u64::max_value());
+                                                               .saturating_add(path_penalty_msat);
 
                                                        if !old_entry.was_processed && new_cost < old_cost {
                                                                targets.push(new_graph_node);
@@ -1196,12 +1221,10 @@ where L::Target: Logger {
                                                .unwrap_or_else(|| CandidateRouteHop::PrivateHop { hint: hop });
                                        let capacity_msat = candidate.effective_capacity().as_msat();
                                        aggregate_next_hops_path_penalty_msat = aggregate_next_hops_path_penalty_msat
-                                               .checked_add(scorer.channel_penalty_msat(hop.short_channel_id, final_value_msat, capacity_msat, &source, &target))
-                                               .unwrap_or_else(|| u64::max_value());
+                                               .saturating_add(scorer.channel_penalty_msat(hop.short_channel_id, final_value_msat, capacity_msat, &source, &target));
 
                                        aggregate_next_hops_cltv_delta = aggregate_next_hops_cltv_delta
-                                               .checked_add(hop.cltv_expiry_delta as u32)
-                                               .unwrap_or_else(|| u32::max_value());
+                                               .saturating_add(hop.cltv_expiry_delta as u32);
 
                                        if !add_entry!(candidate, source, target, aggregate_next_hops_fee_msat, path_value_msat, aggregate_next_hops_path_htlc_minimum_msat, aggregate_next_hops_path_penalty_msat, aggregate_next_hops_cltv_delta) {
                                                // If this hop was not used then there is no use checking the preceding hops
@@ -1439,24 +1462,31 @@ where L::Target: Logger {
        }
 
        // Sort by total fees and take the best paths.
-       payment_paths.sort_by_key(|path| path.get_total_fee_paid_msat());
+       payment_paths.sort_unstable_by_key(|path| path.get_total_fee_paid_msat());
        if payment_paths.len() > 50 {
                payment_paths.truncate(50);
        }
 
        // Draw multiple sufficient routes by randomly combining the selected paths.
        let mut drawn_routes = Vec::new();
-       for i in 0..payment_paths.len() {
+       let mut prng = ChaCha20::new(random_seed_bytes, &[0u8; 12]);
+       let mut random_index_bytes = [0u8; ::core::mem::size_of::<usize>()];
+
+       let num_permutations = payment_paths.len();
+       for _ in 0..num_permutations {
                let mut cur_route = Vec::<PaymentPath>::new();
                let mut aggregate_route_value_msat = 0;
 
                // Step (6).
-               // TODO: real random shuffle
-               // Currently just starts with i_th and goes up to i-1_th in a looped way.
-               let cur_payment_paths = [&payment_paths[i..], &payment_paths[..i]].concat();
+               // Do a Fisher-Yates shuffle to create a random permutation of the payment paths
+               for cur_index in (1..payment_paths.len()).rev() {
+                       prng.process_in_place(&mut random_index_bytes);
+                       let random_index = usize::from_be_bytes(random_index_bytes).wrapping_rem(cur_index+1);
+                       payment_paths.swap(cur_index, random_index);
+               }
 
                // Step (7).
-               for payment_path in cur_payment_paths {
+               for payment_path in &payment_paths {
                        cur_route.push(payment_path.clone());
                        aggregate_route_value_msat += payment_path.get_value_msat();
                        if aggregate_route_value_msat > final_value_msat {
@@ -1466,12 +1496,17 @@ where L::Target: Logger {
                                // also makes routing more reliable.
                                let mut overpaid_value_msat = aggregate_route_value_msat - final_value_msat;
 
-                               // First, drop some expensive low-value paths entirely if possible.
-                               // Sort by value so that we drop many really-low values first, since
-                               // fewer paths is better: the payment is less likely to fail.
-                               // TODO: this could also be optimized by also sorting by feerate_per_sat_routed,
-                               // so that the sender pays less fees overall. And also htlc_minimum_msat.
-                               cur_route.sort_by_key(|path| path.get_value_msat());
+                               // First, we drop some expensive low-value paths entirely if possible, since fewer
+                               // paths is better: the payment is less likely to fail. In order to do so, we sort
+                               // by value and fall back to total fees paid, i.e., in case of equal values we
+                               // prefer lower cost paths.
+                               cur_route.sort_unstable_by(|a, b| {
+                                       a.get_value_msat().cmp(&b.get_value_msat())
+                                               // Reverse ordering for fees, so we drop higher-fee paths first
+                                               .then_with(|| b.get_total_fee_paid_msat().saturating_add(b.get_path_penalty_msat())
+                                                       .cmp(&a.get_total_fee_paid_msat().saturating_add(a.get_path_penalty_msat())))
+                               });
+
                                // We should make sure that at least 1 path left.
                                let mut paths_left = cur_route.len();
                                cur_route.retain(|path| {
@@ -1495,13 +1530,14 @@ where L::Target: Logger {
                                assert!(cur_route.len() > 0);
 
                                // Step (8).
-                               // Now, substract the overpaid value from the most-expensive path.
+                               // Now, subtract the overpaid value from the most-expensive path.
                                // TODO: this could also be optimized by also sorting by feerate_per_sat_routed,
                                // so that the sender pays less fees overall. And also htlc_minimum_msat.
-                               cur_route.sort_by_key(|path| { path.hops.iter().map(|hop| hop.0.candidate.fees().proportional_millionths as u64).sum::<u64>() });
+                               cur_route.sort_unstable_by_key(|path| { path.hops.iter().map(|hop| hop.0.candidate.fees().proportional_millionths as u64).sum::<u64>() });
                                let expensive_payment_path = cur_route.first_mut().unwrap();
-                               // We already dropped all the small channels above, meaning all the
-                               // remaining channels are larger than remaining overpaid_value_msat.
+
+                               // We already dropped all the small value paths above, meaning all the
+                               // remaining paths are larger than remaining overpaid_value_msat.
                                // Thus, this can't be negative.
                                let expensive_path_new_value_msat = expensive_payment_path.get_value_msat() - overpaid_value_msat;
                                expensive_payment_path.update_value_and_recompute_fees(expensive_path_new_value_msat);
@@ -1513,7 +1549,7 @@ where L::Target: Logger {
 
        // Step (9).
        // Select the best route by lowest total fee.
-       drawn_routes.sort_by_key(|paths| paths.iter().map(|path| path.get_total_fee_paid_msat()).sum::<u64>());
+       drawn_routes.sort_unstable_by_key(|paths| paths.iter().map(|path| path.get_total_fee_paid_msat()).sum::<u64>());
        let mut selected_paths = Vec::<Vec<Result<RouteHop, LightningError>>>::new();
        for payment_path in drawn_routes.first().unwrap() {
                let mut path = payment_path.hops.iter().map(|(payment_hop, node_features)| {
@@ -1561,45 +1597,58 @@ fn add_random_cltv_offset(route: &mut Route, payment_params: &PaymentParameters,
        for path in route.paths.iter_mut() {
                let mut shadow_ctlv_expiry_delta_offset: u32 = 0;
 
-               // Choose the last publicly known node as the starting point for the random walk
-               if let Some(starting_hop) = path.iter().rev().find(|h| network_nodes.contains_key(&NodeId::from_pubkey(&h.pubkey))) {
-                       let mut cur_node_id = NodeId::from_pubkey(&starting_hop.pubkey);
+               // Remember the last three nodes of the random walk and avoid looping back on them.
+               // Init with the last three nodes from the actual path, if possible.
+               let mut nodes_to_avoid: [NodeId; 3] = [NodeId::from_pubkey(&path.last().unwrap().pubkey),
+                       NodeId::from_pubkey(&path.get(path.len().saturating_sub(2)).unwrap().pubkey),
+                       NodeId::from_pubkey(&path.get(path.len().saturating_sub(3)).unwrap().pubkey)];
+
+               // Choose the last publicly known node as the starting point for the random walk.
+               let mut cur_hop: Option<NodeId> = None;
+               let mut path_nonce = [0u8; 12];
+               if let Some(starting_hop) = path.iter().rev()
+                       .find(|h| network_nodes.contains_key(&NodeId::from_pubkey(&h.pubkey))) {
+                               cur_hop = Some(NodeId::from_pubkey(&starting_hop.pubkey));
+                               path_nonce.copy_from_slice(&cur_hop.unwrap().as_slice()[..12]);
+               }
+
+               // Init PRNG with the path-dependant nonce, which is static for private paths.
+               let mut prng = ChaCha20::new(random_seed_bytes, &path_nonce);
+               let mut random_path_bytes = [0u8; ::core::mem::size_of::<usize>()];
 
-                       // Init PRNG with path nonce
-                       let mut path_nonce = [0u8; 12];
-                       path_nonce.copy_from_slice(&cur_node_id.as_slice()[..12]);
-                       let mut prng = ChaCha20::new(random_seed_bytes, &path_nonce);
-                       let mut random_path_bytes = [0u8; ::core::mem::size_of::<usize>()];
+               // Pick a random path length in [1 .. 3]
+               prng.process_in_place(&mut random_path_bytes);
+               let random_walk_length = usize::from_be_bytes(random_path_bytes).wrapping_rem(3).wrapping_add(1);
 
-                       // Pick a random path length in [1 .. 3]
-                       prng.process_in_place(&mut random_path_bytes);
-                       let random_walk_length = usize::from_be_bytes(random_path_bytes).wrapping_rem(3).wrapping_add(1);
+               for random_hop in 0..random_walk_length {
+                       // If we don't find a suitable offset in the public network graph, we default to
+                       // MEDIAN_HOP_CLTV_EXPIRY_DELTA.
+                       let mut random_hop_offset = MEDIAN_HOP_CLTV_EXPIRY_DELTA;
 
-                       for _random_hop in 0..random_walk_length {
+                       if let Some(cur_node_id) = cur_hop {
                                if let Some(cur_node) = network_nodes.get(&cur_node_id) {
-                                       // Randomly choose the next hop
+                                       // Randomly choose the next unvisited hop.
                                        prng.process_in_place(&mut random_path_bytes);
-                                       if let Some(random_channel) = usize::from_be_bytes(random_path_bytes).checked_rem(cur_node.channels.len())
+                                       if let Some(random_channel) = usize::from_be_bytes(random_path_bytes)
+                                               .checked_rem(cur_node.channels.len())
                                                .and_then(|index| cur_node.channels.get(index))
                                                .and_then(|id| network_channels.get(id)) {
                                                        random_channel.as_directed_from(&cur_node_id).map(|(dir_info, next_id)| {
-                                                               dir_info.direction().map(|channel_update_info|
-                                                                       shadow_ctlv_expiry_delta_offset = shadow_ctlv_expiry_delta_offset
-                                                                               .checked_add(channel_update_info.cltv_expiry_delta.into())
-                                                                               .unwrap_or(shadow_ctlv_expiry_delta_offset));
-                                                               cur_node_id = *next_id;
+                                                               if !nodes_to_avoid.iter().any(|x| x == next_id) {
+                                                                       nodes_to_avoid[random_hop] = *next_id;
+                                                                       dir_info.direction().map(|channel_update_info| {
+                                                                               random_hop_offset = channel_update_info.cltv_expiry_delta.into();
+                                                                               cur_hop = Some(*next_id);
+                                                                       });
+                                                               }
                                                        });
                                                }
                                }
                        }
-               } else {
-                       // If the entire path is private, choose a random offset from multiples of
-                       // MEDIAN_HOP_CLTV_EXPIRY_DELTA
-                       let mut prng = ChaCha20::new(random_seed_bytes, &[0u8; 8]);
-                       let mut random_bytes = [0u8; 4];
-                       prng.process_in_place(&mut random_bytes);
-                       let random_walk_length = u32::from_be_bytes(random_bytes).wrapping_rem(3).wrapping_add(1);
-                       shadow_ctlv_expiry_delta_offset = random_walk_length * MEDIAN_HOP_CLTV_EXPIRY_DELTA;
+
+                       shadow_ctlv_expiry_delta_offset = shadow_ctlv_expiry_delta_offset
+                               .checked_add(random_hop_offset)
+                               .unwrap_or(shadow_ctlv_expiry_delta_offset);
                }
 
                // Limit the total offset to reduce the worst-case locked liquidity timevalue
@@ -4892,6 +4941,32 @@ mod tests {
                        assert_eq!(route.paths[1][0].short_channel_id, 2);
                        assert_eq!(route.paths[1][0].fee_msat, 50_000);
                }
+
+               {
+                       // If we have a bunch of outbound channels to the same node, where most are not
+                       // sufficient to pay the full payment, but one is, we should default to just using the
+                       // one single channel that has sufficient balance, avoiding MPP.
+                       //
+                       // If we have several options above the 3xpayment value threshold, we should pick the
+                       // smallest of them, avoiding further fragmenting our available outbound balance to
+                       // this node.
+                       let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&[
+                               &get_channel_details(Some(2), nodes[0], InitFeatures::known(), 50_000),
+                               &get_channel_details(Some(3), nodes[0], InitFeatures::known(), 50_000),
+                               &get_channel_details(Some(5), nodes[0], InitFeatures::known(), 50_000),
+                               &get_channel_details(Some(6), nodes[0], InitFeatures::known(), 300_000),
+                               &get_channel_details(Some(7), nodes[0], InitFeatures::known(), 50_000),
+                               &get_channel_details(Some(8), nodes[0], InitFeatures::known(), 50_000),
+                               &get_channel_details(Some(9), nodes[0], InitFeatures::known(), 50_000),
+                               &get_channel_details(Some(4), nodes[0], InitFeatures::known(), 1_000_000),
+                       ]), 100_000, 42, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+                       assert_eq!(route.paths.len(), 1);
+                       assert_eq!(route.paths[0].len(), 1);
+
+                       assert_eq!(route.paths[0][0].pubkey, nodes[0]);
+                       assert_eq!(route.paths[0][0].short_channel_id, 6);
+                       assert_eq!(route.paths[0][0].fee_msat, 100_000);
+               }
        }
 
        #[test]
@@ -5094,7 +5169,7 @@ mod tests {
                        .with_max_total_cltv_expiry_delta(feasible_max_total_cltv_delta);
                let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
-               let route = get_route(&our_id, &feasible_payment_params, &network_graph, None, 100, 42, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+               let route = get_route(&our_id, &feasible_payment_params, &network_graph, None, 100, 0, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
                let path = route.paths[0].iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
                assert_ne!(path.len(), 0);
 
@@ -5102,7 +5177,7 @@ mod tests {
                let fail_max_total_cltv_delta = 23;
                let fail_payment_params = PaymentParameters::from_node_id(nodes[6]).with_route_hints(last_hops(&nodes))
                        .with_max_total_cltv_expiry_delta(fail_max_total_cltv_delta);
-               match get_route(&our_id, &fail_payment_params, &network_graph, None, 100, 42, Arc::clone(&logger), &scorer, &random_seed_bytes)
+               match get_route(&our_id, &fail_payment_params, &network_graph, None, 100, 0, Arc::clone(&logger), &scorer, &random_seed_bytes)
                {
                        Err(LightningError { err, .. } ) => {
                                assert_eq!(err, "Failed to find a path to the given destination");
@@ -5438,7 +5513,7 @@ mod benches {
                let mut routes = Vec::new();
                let mut route_endpoints = Vec::new();
                let mut seed: usize = 0xdeadbeef;
-               'load_endpoints: for _ in 0..100 {
+               'load_endpoints: for _ in 0..150 {
                        loop {
                                seed *= 0xdeadbeef;
                                let src = PublicKey::from_slice(nodes.keys().skip(seed % nodes.len()).next().unwrap().as_slice()).unwrap();
@@ -5470,6 +5545,15 @@ mod benches {
                        }
                }
 
+               // Because we've changed channel scores, its possible we'll take different routes to the
+               // selected destinations, possibly causing us to fail because, eg, the newly-selected path
+               // requires a too-high CLTV delta.
+               route_endpoints.retain(|(first_hop, params, amt)| {
+                       get_route(&payer, params, &graph.read_only(), Some(&[first_hop]), *amt, 42, &DummyLogger{}, &scorer, &random_seed_bytes).is_ok()
+               });
+               route_endpoints.truncate(100);
+               assert_eq!(route_endpoints.len(), 100);
+
                // ...then benchmark finding paths between the nodes we learned.
                let mut idx = 0;
                bench.iter(|| {
index c39abd1d0ff71d2f613935959007b333204371b2..459303f7d87f0c988293a2360798665570607955 100644 (file)
@@ -197,10 +197,6 @@ pub struct FixedPenaltyScorer {
        penalty_msat: u64,
 }
 
-impl_writeable_tlv_based!(FixedPenaltyScorer, {
-       (0, penalty_msat, required),
-});
-
 impl FixedPenaltyScorer {
        /// Creates a new scorer using `penalty_msat`.
        pub fn with_penalty(penalty_msat: u64) -> Self {
@@ -218,6 +214,22 @@ impl Score for FixedPenaltyScorer {
        fn payment_path_successful(&mut self, _path: &[&RouteHop]) {}
 }
 
+impl Writeable for FixedPenaltyScorer {
+       #[inline]
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+               write_tlv_fields!(w, {});
+               Ok(())
+       }
+}
+
+impl ReadableArgs<u64> for FixedPenaltyScorer {
+       #[inline]
+       fn read<R: Read>(r: &mut R, penalty_msat: u64) -> Result<Self, DecodeError> {
+               read_tlv_fields!(r, {});
+               Ok(Self { penalty_msat })
+       }
+}
+
 /// [`Score`] implementation that provides reasonable default behavior.
 ///
 /// Used to apply a fixed penalty to each channel, thus avoiding long paths when shorter paths with
@@ -504,19 +516,24 @@ pub struct ProbabilisticScorerUsingTime<G: Deref<Target = NetworkGraph>, T: Time
 }
 
 /// Parameters for configuring [`ProbabilisticScorer`].
+///
+/// Used to configure a base penalty and a liquidity penalty, the sum of which is the channel
+/// penalty (i.e., the amount in msats willing to be paid to avoid routing through the channel).
 #[derive(Clone, Copy)]
 pub struct ProbabilisticScoringParameters {
-       /// A multiplier used to determine the amount in msats willing to be paid to avoid routing
-       /// through a channel, as per multiplying by the negative `log10` of the channel's success
-       /// probability for a payment.
+       /// A fixed penalty in msats to apply to each channel.
        ///
-       /// The success probability is determined by the effective channel capacity, the payment amount,
-       /// and knowledge learned from prior successful and unsuccessful payments. The lower bound of
-       /// the success probability is 0.01, effectively limiting the penalty to the range
-       /// `0..=2*liquidity_penalty_multiplier_msat`. The knowledge learned is decayed over time based
-       /// on [`liquidity_offset_half_life`].
+       /// Default value: 500 msat
+       pub base_penalty_msat: u64,
+
+       /// A multiplier used in conjunction with the negative `log10` of the channel's success
+       /// probability for a payment to determine the liquidity penalty.
+       ///
+       /// The penalty is based in part by the knowledge learned from prior successful and unsuccessful
+       /// payments. This knowledge is decayed over time based on [`liquidity_offset_half_life`]. The
+       /// penalty is effectively limited to `2 * liquidity_penalty_multiplier_msat`.
        ///
-       /// Default value: 10,000 msat
+       /// Default value: 40,000 msat
        ///
        /// [`liquidity_offset_half_life`]: Self::liquidity_offset_half_life
        pub liquidity_penalty_multiplier_msat: u64,
@@ -537,11 +554,6 @@ pub struct ProbabilisticScoringParameters {
        pub liquidity_offset_half_life: Duration,
 }
 
-impl_writeable_tlv_based!(ProbabilisticScoringParameters, {
-       (0, liquidity_penalty_multiplier_msat, required),
-       (2, liquidity_offset_half_life, required),
-});
-
 /// Accounting for channel liquidity balance uncertainty.
 ///
 /// Direction is defined in terms of [`NodeId`] partial ordering, where the source node is the
@@ -590,7 +602,8 @@ impl<G: Deref<Target = NetworkGraph>, T: Time> ProbabilisticScorerUsingTime<G, T
 impl Default for ProbabilisticScoringParameters {
        fn default() -> Self {
                Self {
-                       liquidity_penalty_multiplier_msat: 10_000,
+                       base_penalty_msat: 500,
+                       liquidity_penalty_multiplier_msat: 40_000,
                        liquidity_offset_half_life: Duration::from_secs(3600),
                }
        }
@@ -653,20 +666,21 @@ impl<L: Deref<Target = u64>, T: Time, U: Deref<Target = T>> DirectedChannelLiqui
        /// Returns a penalty for routing the given HTLC `amount_msat` through the channel in this
        /// direction.
        fn penalty_msat(&self, amount_msat: u64, liquidity_penalty_multiplier_msat: u64) -> u64 {
+               let max_penalty_msat = liquidity_penalty_multiplier_msat.saturating_mul(2);
                let max_liquidity_msat = self.max_liquidity_msat();
                let min_liquidity_msat = core::cmp::min(self.min_liquidity_msat(), max_liquidity_msat);
                if amount_msat > max_liquidity_msat {
-                       u64::max_value()
+                       max_penalty_msat
                } else if amount_msat <= min_liquidity_msat {
                        0
                } else {
-                       let numerator = max_liquidity_msat + 1 - amount_msat;
-                       let denominator = max_liquidity_msat + 1 - min_liquidity_msat;
-                       approx::negative_log10_times_1024(numerator, denominator)
-                               .saturating_mul(liquidity_penalty_multiplier_msat) / 1024
+                       let numerator = (max_liquidity_msat - amount_msat).saturating_add(1);
+                       let denominator = (max_liquidity_msat - min_liquidity_msat).saturating_add(1);
+                       let penalty_msat = approx::negative_log10_times_1024(numerator, denominator)
+                               .saturating_mul(liquidity_penalty_multiplier_msat) / 1024;
+                       // Upper bound the penalty to ensure some channel is selected.
+                       penalty_msat.min(max_penalty_msat)
                }
-               // Upper bound the penalty to ensure some channel is selected.
-               .min(2 * liquidity_penalty_multiplier_msat)
        }
 
        /// Returns the lower bound of the channel liquidity balance in this direction.
@@ -745,6 +759,7 @@ impl<G: Deref<Target = NetworkGraph>, T: Time> Score for ProbabilisticScorerUsin
                        .unwrap_or(&ChannelLiquidity::new())
                        .as_directed(source, target, capacity_msat, liquidity_offset_half_life)
                        .penalty_msat(amount_msat, liquidity_penalty_multiplier_msat)
+                       .saturating_add(self.params.base_penalty_msat)
        }
 
        fn payment_path_failed(&mut self, path: &[&RouteHop], short_channel_id: u64) {
@@ -927,11 +942,8 @@ impl<G: Deref<Target = NetworkGraph>, T: Time> Writeable for ProbabilisticScorer
        }
 }
 
-impl<G, T> ReadableArgs<(ProbabilisticScoringParameters, G)> for ProbabilisticScorerUsingTime<G, T>
-where
-       G: Deref<Target = NetworkGraph>,
-       T: Time,
-{
+impl<G: Deref<Target = NetworkGraph>, T: Time>
+ReadableArgs<(ProbabilisticScoringParameters, G)> for ProbabilisticScorerUsingTime<G, T> {
        #[inline]
        fn read<R: Read>(
                r: &mut R, args: (ProbabilisticScoringParameters, G)
@@ -1725,7 +1737,7 @@ mod tests {
        fn increased_penalty_nearing_liquidity_upper_bound() {
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
-                       liquidity_penalty_multiplier_msat: 1_000, ..Default::default()
+                       base_penalty_msat: 0, liquidity_penalty_multiplier_msat: 1_000, ..Default::default()
                };
                let scorer = ProbabilisticScorer::new(params, &network_graph);
                let source = source_node_id();
@@ -1750,7 +1762,7 @@ mod tests {
                let last_updated = SinceEpoch::now();
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
-                       liquidity_penalty_multiplier_msat: 1_000, ..Default::default()
+                       base_penalty_msat: 0, liquidity_penalty_multiplier_msat: 1_000, ..Default::default()
                };
                let scorer = ProbabilisticScorer::new(params, &network_graph)
                        .with_channel(42,
@@ -1770,7 +1782,7 @@ mod tests {
        fn does_not_further_penalize_own_channel() {
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
-                       liquidity_penalty_multiplier_msat: 1_000, ..Default::default()
+                       base_penalty_msat: 0, liquidity_penalty_multiplier_msat: 1_000, ..Default::default()
                };
                let mut scorer = ProbabilisticScorer::new(params, &network_graph);
                let sender = sender_node_id();
@@ -1791,7 +1803,7 @@ mod tests {
        fn sets_liquidity_lower_bound_on_downstream_failure() {
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
-                       liquidity_penalty_multiplier_msat: 1_000, ..Default::default()
+                       base_penalty_msat: 0, liquidity_penalty_multiplier_msat: 1_000, ..Default::default()
                };
                let mut scorer = ProbabilisticScorer::new(params, &network_graph);
                let source = source_node_id();
@@ -1813,7 +1825,7 @@ mod tests {
        fn sets_liquidity_upper_bound_on_failure() {
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
-                       liquidity_penalty_multiplier_msat: 1_000, ..Default::default()
+                       base_penalty_msat: 0, liquidity_penalty_multiplier_msat: 1_000, ..Default::default()
                };
                let mut scorer = ProbabilisticScorer::new(params, &network_graph);
                let source = source_node_id();
@@ -1835,7 +1847,7 @@ mod tests {
        fn reduces_liquidity_upper_bound_along_path_on_success() {
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
-                       liquidity_penalty_multiplier_msat: 1_000, ..Default::default()
+                       base_penalty_msat: 0, liquidity_penalty_multiplier_msat: 1_000, ..Default::default()
                };
                let mut scorer = ProbabilisticScorer::new(params, &network_graph);
                let sender = sender_node_id();
@@ -1859,6 +1871,7 @@ mod tests {
        fn decays_liquidity_bounds_over_time() {
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
+                       base_penalty_msat: 0,
                        liquidity_penalty_multiplier_msat: 1_000,
                        liquidity_offset_half_life: Duration::from_secs(10),
                };
@@ -1910,6 +1923,7 @@ mod tests {
        fn decays_liquidity_bounds_without_shift_overflow() {
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
+                       base_penalty_msat: 0,
                        liquidity_penalty_multiplier_msat: 1_000,
                        liquidity_offset_half_life: Duration::from_secs(10),
                };
@@ -1934,6 +1948,7 @@ mod tests {
        fn restricts_liquidity_bounds_after_decay() {
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
+                       base_penalty_msat: 0,
                        liquidity_penalty_multiplier_msat: 1_000,
                        liquidity_offset_half_life: Duration::from_secs(10),
                };
@@ -1971,6 +1986,7 @@ mod tests {
        fn restores_persisted_liquidity_bounds() {
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
+                       base_penalty_msat: 0,
                        liquidity_penalty_multiplier_msat: 1_000,
                        liquidity_offset_half_life: Duration::from_secs(10),
                };
@@ -2000,6 +2016,7 @@ mod tests {
        fn decays_persisted_liquidity_bounds() {
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
+                       base_penalty_msat: 0,
                        liquidity_penalty_multiplier_msat: 1_000,
                        liquidity_offset_half_life: Duration::from_secs(10),
                };
@@ -2026,4 +2043,39 @@ mod tests {
                SinceEpoch::advance(Duration::from_secs(10));
                assert_eq!(deserialized_scorer.channel_penalty_msat(42, 500, 1_000, &source, &target), 371);
        }
+
+       #[test]
+       fn adds_base_penalty_to_liquidity_penalty() {
+               let network_graph = network_graph();
+               let source = source_node_id();
+               let target = target_node_id();
+
+               let params = ProbabilisticScoringParameters {
+                       base_penalty_msat: 0, liquidity_penalty_multiplier_msat: 1_000, ..Default::default()
+               };
+               let scorer = ProbabilisticScorer::new(params, &network_graph);
+               assert_eq!(scorer.channel_penalty_msat(42, 128, 1_024, &source, &target), 58);
+
+               let params = ProbabilisticScoringParameters {
+                       base_penalty_msat: 500, liquidity_penalty_multiplier_msat: 1_000, ..Default::default()
+               };
+               let scorer = ProbabilisticScorer::new(params, &network_graph);
+               assert_eq!(scorer.channel_penalty_msat(42, 128, 1_024, &source, &target), 558);
+       }
+
+       #[test]
+       fn calculates_log10_without_overflowing_u64_max_value() {
+               let network_graph = network_graph();
+               let source = source_node_id();
+               let target = target_node_id();
+
+               let params = ProbabilisticScoringParameters {
+                       base_penalty_msat: 0, ..Default::default()
+               };
+               let scorer = ProbabilisticScorer::new(params, &network_graph);
+               assert_eq!(
+                       scorer.channel_penalty_msat(42, u64::max_value(), u64::max_value(), &source, &target),
+                       80_000,
+               );
+       }
 }
index f8a3f847d4348e5cbe13033fef25645125cdf6d4..300ddacb020566dac6fc77ac537077918b76e925 100644 (file)
@@ -1,6 +1,7 @@
 use bitcoin::hashes::{Hash, HashEngine};
 use bitcoin::hashes::hmac::{Hmac, HmacEngine};
 use bitcoin::hashes::sha256::Hash as Sha256;
+use bitcoin::secp256k1::{Message, Secp256k1, SecretKey, Signature, Signing};
 
 macro_rules! hkdf_extract_expand {
        ($salt: expr, $ikm: expr) => {{
@@ -36,3 +37,12 @@ pub fn hkdf_extract_expand_twice(salt: &[u8], ikm: &[u8]) -> ([u8; 32], [u8; 32]
 pub fn hkdf_extract_expand_thrice(salt: &[u8], ikm: &[u8]) -> ([u8; 32], [u8; 32], [u8; 32]) {
        hkdf_extract_expand!(salt, ikm, 3)
 }
+
+#[inline]
+pub fn sign<C: Signing>(ctx: &Secp256k1<C>, msg: &Message, sk: &SecretKey) -> Signature {
+       #[cfg(feature = "grind_signatures")]
+       let sig = ctx.sign_low_r(msg, sk);
+       #[cfg(not(feature = "grind_signatures"))]
+       let sig = ctx.sign(msg, sk);
+       sig
+}
index 8b5a04370cfdcef0429c6a89f972bd9d7e0011a8..ea50398b5e54ad2d2d1c691dadd31b0f3fe341fb 100644 (file)
@@ -366,11 +366,15 @@ pub enum Event {
                /// The channel_id of the channel which has been closed. Note that on-chain transactions
                /// resolving the channel are likely still awaiting confirmation.
                channel_id: [u8; 32],
-               /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`], or 0 for
-               /// an inbound channel. This will always be zero for objects serialized with LDK versions
-               /// prior to 0.0.102.
+               /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound
+               /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if
+               /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise
+               /// `user_channel_id` will be 0 for an inbound channel.
+               /// This will always be zero for objects serialized with LDK versions prior to 0.0.102.
                ///
                /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel
+               /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
+               /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
                user_channel_id: u64,
                /// The reason the channel was closed.
                reason: ClosureReason
@@ -920,7 +924,15 @@ pub enum MessageSendEvent {
                node_id: PublicKey,
                /// The reply_channel_range which should be sent.
                msg: msgs::ReplyChannelRange,
-       }
+       },
+       /// Sends a timestamp filter for inbound gossip. This should be sent on each new connection to
+       /// enable receiving gossip messages from the peer.
+       SendGossipTimestampFilter {
+               /// The node_id of this message recipient
+               node_id: PublicKey,
+               /// The gossip_timestamp_filter which should be sent.
+               msg: msgs::GossipTimestampFilter,
+       },
 }
 
 /// A trait indicating an object may generate message send events
index 27c2d9de874ed45abca03dabe171ca238d7288ff..3c36cdf066a5048bbb1b6fe112c2906ac824e6cc 100644 (file)
@@ -384,7 +384,7 @@ impl msgs::RoutingMessageHandler for TestRoutingMessageHandler {
                Vec::new()
        }
 
-       fn sync_routing_table(&self, _their_node_id: &PublicKey, _init_msg: &msgs::Init) {}
+       fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &msgs::Init) {}
 
        fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyChannelRange) -> Result<(), msgs::LightningError> {
                Ok(())