Merge pull request #2812 from valentinewallace/2023-12-blinded-forwarding
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Wed, 17 Jan 2024 00:28:30 +0000 (00:28 +0000)
committerGitHub <noreply@github.com>
Wed, 17 Jan 2024 00:28:30 +0000 (00:28 +0000)
Complete route blinding support

CONTRIBUTING.md
lightning-block-sync/src/gossip.rs
lightning-invoice/src/ser.rs
lightning/src/events/mod.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/peer_handler.rs
lightning/src/routing/router.rs
lightning/src/util/wakers.rs

index 78c515007d03a75ba2b0261d25ebc74f46531244..b57217165e6b79c239351ebeef85647bafdab18a 100644 (file)
@@ -148,7 +148,7 @@ Security
 --------
 
 Security is the primary focus of `rust-lightning`; disclosure of security
-vulnerabilites helps prevent user loss of funds. If you believe a vulnerability
+vulnerabilities helps prevent user loss of funds. If you believe a vulnerability
 may affect other Lightning implementations, please inform them.
 
 You can find further information on submitting (possible) vulnerabilities in the
index 591b0298793ffb3baadb1d0ee0009b1199a95941..9cd4049679c30fd5d7ddd2f86562d6df81758447 100644 (file)
@@ -132,14 +132,12 @@ impl<
 pub struct GossipVerifier<S: FutureSpawner,
        Blocks: Deref + Send + Sync + 'static + Clone,
        L: Deref + Send + Sync + 'static,
-       APM: Deref + Send + Sync + 'static + Clone,
 > where
        Blocks::Target: UtxoSource,
        L::Target: Logger,
-       APM::Target: APeerManager,
 {
        source: Blocks,
-       peer_manager: APM,
+       peer_manager_wake: Arc<dyn Fn() + Send + Sync>,
        gossiper: Arc<P2PGossipSync<Arc<NetworkGraph<L>>, Self, L>>,
        spawn: S,
        block_cache: Arc<Mutex<VecDeque<(u32, Block)>>>,
@@ -150,19 +148,20 @@ const BLOCK_CACHE_SIZE: usize = 5;
 impl<S: FutureSpawner,
        Blocks: Deref + Send + Sync + Clone,
        L: Deref + Send + Sync,
-       APM: Deref + Send + Sync + Clone,
-> GossipVerifier<S, Blocks, L, APM> where
+> GossipVerifier<S, Blocks, L> where
        Blocks::Target: UtxoSource,
        L::Target: Logger,
-       APM::Target: APeerManager,
 {
        /// Constructs a new [`GossipVerifier`].
        ///
        /// This is expected to be given to a [`P2PGossipSync`] (initially constructed with `None` for
        /// the UTXO lookup) via [`P2PGossipSync::add_utxo_lookup`].
-       pub fn new(source: Blocks, spawn: S, gossiper: Arc<P2PGossipSync<Arc<NetworkGraph<L>>, Self, L>>, peer_manager: APM) -> Self {
+       pub fn new<APM: Deref + Send + Sync + Clone + 'static>(
+               source: Blocks, spawn: S, gossiper: Arc<P2PGossipSync<Arc<NetworkGraph<L>>, Self, L>>, peer_manager: APM
+       ) -> Self where APM::Target: APeerManager {
+               let peer_manager_wake = Arc::new(move || peer_manager.as_ref().process_events());
                Self {
-                       source, spawn, gossiper, peer_manager,
+                       source, spawn, gossiper, peer_manager_wake,
                        block_cache: Arc::new(Mutex::new(VecDeque::with_capacity(BLOCK_CACHE_SIZE))),
                }
        }
@@ -252,11 +251,9 @@ impl<S: FutureSpawner,
 impl<S: FutureSpawner,
        Blocks: Deref + Send + Sync + Clone,
        L: Deref + Send + Sync,
-       APM: Deref + Send + Sync + Clone,
-> Deref for GossipVerifier<S, Blocks, L, APM> where
+> Deref for GossipVerifier<S, Blocks, L> where
        Blocks::Target: UtxoSource,
        L::Target: Logger,
-       APM::Target: APeerManager,
 {
        type Target = Self;
        fn deref(&self) -> &Self { self }
@@ -266,11 +263,9 @@ impl<S: FutureSpawner,
 impl<S: FutureSpawner,
        Blocks: Deref + Send + Sync + Clone,
        L: Deref + Send + Sync,
-       APM: Deref + Send + Sync + Clone,
-> UtxoLookup for GossipVerifier<S, Blocks, L, APM> where
+> UtxoLookup for GossipVerifier<S, Blocks, L> where
        Blocks::Target: UtxoSource,
        L::Target: Logger,
-       APM::Target: APeerManager,
 {
        fn get_utxo(&self, _chain_hash: &ChainHash, short_channel_id: u64) -> UtxoResult {
                let res = UtxoFuture::new();
@@ -278,11 +273,11 @@ impl<S: FutureSpawner,
                let source = self.source.clone();
                let gossiper = Arc::clone(&self.gossiper);
                let block_cache = Arc::clone(&self.block_cache);
-               let pm = self.peer_manager.clone();
+               let pmw = Arc::clone(&self.peer_manager_wake);
                self.spawn.spawn(async move {
                        let res = Self::retrieve_utxo(source, block_cache, short_channel_id).await;
                        fut.resolve(gossiper.network_graph(), &*gossiper, res);
-                       pm.as_ref().process_events();
+                       (pmw)();
                });
                UtxoResult::Async(res)
        }
index fe42f72b6532f593083049832a07f31272cd43a5..dc63783bfa388fc35f7394d8cc243471683df99e 100644 (file)
@@ -52,7 +52,7 @@ impl<'a, W: WriteBase32> BytesToBase32<'a, W> {
                }
 
                // Combine all bits from buffer with enough bits from this rounds byte so that they fill
-               // a u5. Save reamining bits from byte to buffer.
+               // a u5. Save remaining bits from byte to buffer.
                let from_buffer = self.buffer >> 3;
                let from_byte = byte >> (3 + self.buffer_bits); // buffer_bits <= 4
 
index dede02a37cb9c5e6349dfb27e7504cc3b1fb83fc..77f6937c54558aae85d10a3c1c8cc9c05bc5d424 100644 (file)
@@ -24,6 +24,7 @@ use crate::ln::channel::FUNDING_CONF_DEADLINE_BLOCKS;
 use crate::ln::features::ChannelTypeFeatures;
 use crate::ln::msgs;
 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret};
+use crate::chain::transaction;
 use crate::routing::gossip::NetworkUpdate;
 use crate::util::errors::APIError;
 use crate::util::ser::{BigSize, FixedLengthReader, Writeable, Writer, MaybeReadable, Readable, RequiredWrapper, UpgradableRequired, WithoutLength};
@@ -861,7 +862,7 @@ pub enum Event {
        ///
        /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
        /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
-       ChannelClosed  {
+       ChannelClosed {
                /// The `channel_id` of the channel which has been closed. Note that on-chain transactions
                /// resolving the channel are likely still awaiting confirmation.
                channel_id: ChannelId,
@@ -886,6 +887,10 @@ pub enum Event {
                ///
                /// This field will be `None` for objects serialized prior to LDK 0.0.117.
                channel_capacity_sats: Option<u64>,
+               /// The original channel funding TXO; this helps checking for the existence and confirmation
+               /// status of the closing tx.
+               /// Note that for instances serialized in v0.0.119 or prior this will be missing (None).
+               channel_funding_txo: Option<transaction::OutPoint>,
        },
        /// Used to indicate to the user that they can abandon the funding transaction and recycle the
        /// inputs for another purpose.
@@ -1091,7 +1096,7 @@ impl Writeable for Event {
                                });
                        },
                        &Event::ChannelClosed { ref channel_id, ref user_channel_id, ref reason,
-                               ref counterparty_node_id, ref channel_capacity_sats
+                               ref counterparty_node_id, ref channel_capacity_sats, ref channel_funding_txo
                        } => {
                                9u8.write(writer)?;
                                // `user_channel_id` used to be a single u64 value. In order to remain backwards
@@ -1106,6 +1111,7 @@ impl Writeable for Event {
                                        (3, user_channel_id_high, required),
                                        (5, counterparty_node_id, option),
                                        (7, channel_capacity_sats, option),
+                                       (9, channel_funding_txo, option),
                                });
                        },
                        &Event::DiscardFunding { ref channel_id, ref transaction } => {
@@ -1405,6 +1411,7 @@ impl MaybeReadable for Event {
                                        let mut user_channel_id_high_opt: Option<u64> = None;
                                        let mut counterparty_node_id = None;
                                        let mut channel_capacity_sats = None;
+                                       let mut channel_funding_txo = None;
                                        read_tlv_fields!(reader, {
                                                (0, channel_id, required),
                                                (1, user_channel_id_low_opt, option),
@@ -1412,6 +1419,7 @@ impl MaybeReadable for Event {
                                                (3, user_channel_id_high_opt, option),
                                                (5, counterparty_node_id, option),
                                                (7, channel_capacity_sats, option),
+                                               (9, channel_funding_txo, option),
                                        });
 
                                        // `user_channel_id` used to be a single u64 value. In order to remain
@@ -1421,7 +1429,7 @@ impl MaybeReadable for Event {
                                                ((user_channel_id_high_opt.unwrap_or(0) as u128) << 64);
 
                                        Ok(Some(Event::ChannelClosed { channel_id, user_channel_id, reason: _init_tlv_based_struct_field!(reason, upgradable_required),
-                                               counterparty_node_id, channel_capacity_sats }))
+                                               counterparty_node_id, channel_capacity_sats, channel_funding_txo }))
                                };
                                f()
                        },
index b3657701a4a4b876c80694bd2496e3662a78937a..ec4f26664a76b053bf7313695e686bdfe7b37c22 100644 (file)
@@ -827,6 +827,7 @@ pub(crate) struct ShutdownResult {
        pub(crate) channel_capacity_satoshis: u64,
        pub(crate) counterparty_node_id: PublicKey,
        pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
+       pub(crate) channel_funding_txo: Option<OutPoint>,
 }
 
 /// If the majority of the channels funds are to the fundee and the initiator holds only just
@@ -2415,6 +2416,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        channel_capacity_satoshis: self.channel_value_satoshis,
                        counterparty_node_id: self.counterparty_node_id,
                        unbroadcasted_funding_tx,
+                       channel_funding_txo: self.get_funding_txo(),
                }
        }
 
@@ -4943,6 +4945,7 @@ impl<SP: Deref> Channel<SP> where
                                        channel_capacity_satoshis: self.context.channel_value_satoshis,
                                        counterparty_node_id: self.context.counterparty_node_id,
                                        unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
+                                       channel_funding_txo: self.context.get_funding_txo(),
                                };
                                let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
                                self.context.channel_state = ChannelState::ShutdownComplete;
@@ -4977,6 +4980,7 @@ impl<SP: Deref> Channel<SP> where
                                                                channel_capacity_satoshis: self.context.channel_value_satoshis,
                                                                counterparty_node_id: self.context.counterparty_node_id,
                                                                unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
+                                                               channel_funding_txo: self.context.get_funding_txo(),
                                                        };
                                                        self.context.channel_state = ChannelState::ShutdownComplete;
                                                        self.context.update_time_counter += 1;
index 34cce867e61d63d21545be7bfbbb61be8b2f3b3b..87197dd45493ff6ba689179cbe721cd905d834ea 100644 (file)
@@ -2890,6 +2890,7 @@ where
                                reason: shutdown_res.closure_reason,
                                counterparty_node_id: Some(shutdown_res.counterparty_node_id),
                                channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis),
+                               channel_funding_txo: shutdown_res.channel_funding_txo,
                        }, None));
 
                        if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx {
@@ -10336,6 +10337,7 @@ where
                                                reason: ClosureReason::OutdatedChannelManager,
                                                counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
                                                channel_capacity_sats: Some(channel.context.get_value_satoshis()),
+                                               channel_funding_txo: channel.context.get_funding_txo(),
                                        }, None));
                                        for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
                                                let mut found_htlc = false;
@@ -10389,6 +10391,7 @@ where
                                        reason: ClosureReason::DisconnectedPeer,
                                        counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
                                        channel_capacity_sats: Some(channel.context.get_value_satoshis()),
+                                       channel_funding_txo: channel.context.get_funding_txo(),
                                }, None));
                        } else {
                                log_error!(logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id());
index 2adfe2f9379c04c6352a709ff2471232f87d6141..d153a0a4c2d7b964f6c4d4f6656e7959a7de6c88 100644 (file)
@@ -1539,6 +1539,8 @@ pub struct ExpectedCloseEvent {
        pub counterparty_node_id: Option<PublicKey>,
        pub discard_funding: bool,
        pub reason: Option<ClosureReason>,
+       pub channel_funding_txo: Option<OutPoint>,
+       pub user_channel_id: Option<u128>,
 }
 
 impl ExpectedCloseEvent {
@@ -1549,6 +1551,8 @@ impl ExpectedCloseEvent {
                        counterparty_node_id: None,
                        discard_funding,
                        reason: Some(reason),
+                       channel_funding_txo: None,
+                       user_channel_id: None,
                }
        }
 }
@@ -1567,12 +1571,20 @@ pub fn check_closed_events(node: &Node, expected_close_events: &[ExpectedCloseEv
                                reason,
                                counterparty_node_id,
                                channel_capacity_sats,
+                               channel_funding_txo,
+                               user_channel_id,
                                ..
                        } if (
                                expected_event.channel_id.map(|expected| *channel_id == expected).unwrap_or(true) &&
                                expected_event.reason.as_ref().map(|expected| reason == expected).unwrap_or(true) &&
-                               expected_event.counterparty_node_id.map(|expected| *counterparty_node_id == Some(expected)).unwrap_or(true) &&
-                               expected_event.channel_capacity_sats.map(|expected| *channel_capacity_sats == Some(expected)).unwrap_or(true)
+                               expected_event.
+                                       counterparty_node_id.map(|expected| *counterparty_node_id == Some(expected)).unwrap_or(true) &&
+                               expected_event.channel_capacity_sats
+                                       .map(|expected| *channel_capacity_sats == Some(expected)).unwrap_or(true) &&
+                               expected_event.channel_funding_txo
+                                       .map(|expected| *channel_funding_txo == Some(expected)).unwrap_or(true) &&
+                               expected_event.user_channel_id
+                                       .map(|expected| *user_channel_id == expected).unwrap_or(true)
                        )
                )));
        }
@@ -1597,6 +1609,8 @@ pub fn check_closed_event(node: &Node, events_count: usize, expected_reason: Clo
                counterparty_node_id: Some(*node_id),
                discard_funding: is_check_discard_funding,
                reason: Some(expected_reason.clone()),
+               channel_funding_txo: None,
+               user_channel_id: None,
        }).collect::<Vec<_>>();
        check_closed_events(node, expected_close_events.as_slice());
 }
index 6f79b59774e6a728d43905fa44378b97d0fbda14..5711f238aa0c519a8ccf58aae91bb80694dcf70f 100644 (file)
@@ -10689,17 +10689,23 @@ fn test_disconnect_in_funding_batch() {
        nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
 
        // The channels in the batch will close immediately.
-       let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id();
-       let channel_id_2 = OutPoint { txid: tx.txid(), index: 1 }.to_channel_id();
+       let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
+       let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
+       let channel_id_1 = funding_txo_1.to_channel_id();
+       let channel_id_2 = funding_txo_2.to_channel_id();
        check_closed_events(&nodes[0], &[
                ExpectedCloseEvent {
                        channel_id: Some(channel_id_1),
                        discard_funding: true,
+                       channel_funding_txo: Some(funding_txo_1),
+                       user_channel_id: Some(42),
                        ..Default::default()
                },
                ExpectedCloseEvent {
                        channel_id: Some(channel_id_2),
                        discard_funding: true,
+                       channel_funding_txo: Some(funding_txo_2),
+                       user_channel_id: Some(43),
                        ..Default::default()
                },
        ]);
@@ -10757,8 +10763,10 @@ fn test_batch_funding_close_after_funding_signed() {
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
 
        // Force-close the channel for which we've completed the initial monitor.
-       let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id();
-       let channel_id_2 = OutPoint { txid: tx.txid(), index: 1 }.to_channel_id();
+       let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
+       let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
+       let channel_id_1 = funding_txo_1.to_channel_id();
+       let channel_id_2 = funding_txo_2.to_channel_id();
        nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
        check_added_monitors(&nodes[0], 2);
        {
@@ -10790,11 +10798,15 @@ fn test_batch_funding_close_after_funding_signed() {
                ExpectedCloseEvent {
                        channel_id: Some(channel_id_1),
                        discard_funding: true,
+                       channel_funding_txo: Some(funding_txo_1),
+                       user_channel_id: Some(42),
                        ..Default::default()
                },
                ExpectedCloseEvent {
                        channel_id: Some(channel_id_2),
                        discard_funding: true,
+                       channel_funding_txo: Some(funding_txo_2),
+                       user_channel_id: Some(43),
                        ..Default::default()
                },
        ]);
index 003e95645093a14af850d3bfcceb022c01337afe..6d46558b188b60c2433f51f002121bad268df60c 100644 (file)
@@ -1610,7 +1610,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                }
 
                if let wire::Message::GossipTimestampFilter(_msg) = message {
-                       // When supporting gossip messages, start inital gossip sync only after we receive
+                       // When supporting gossip messages, start initial gossip sync only after we receive
                        // a GossipTimestampFilter
                        if peer_lock.their_features.as_ref().unwrap().supports_gossip_queries() &&
                                !peer_lock.sent_gossip_timestamp_filter {
@@ -2217,7 +2217,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                                        log_pubkey!(node_id));
                                                                        }
                                                                        // We do not have the peers write lock, so we just store that we're
-                                                                       // about to disconenct the peer and do it after we finish
+                                                                       // about to disconnect the peer and do it after we finish
                                                                        // processing most messages.
                                                                        let msg = msg.map(|msg| wire::Message::<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>::Error(msg));
                                                                        peers_to_disconnect.insert(node_id, msg);
@@ -2226,7 +2226,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                        log_trace!(logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
                                                                                log_pubkey!(node_id), msg.data);
                                                                        // We do not have the peers write lock, so we just store that we're
-                                                                       // about to disconenct the peer and do it after we finish
+                                                                       // about to disconnect the peer and do it after we finish
                                                                        // processing most messages.
                                                                        peers_to_disconnect.insert(node_id, Some(wire::Message::Warning(msg)));
                                                                },
index 277661862e3361d72a3b3b44975a928422a87db9..485fd239128df8ab39b0806723a7d3d6b6e24e3a 100644 (file)
@@ -2706,7 +2706,7 @@ where L::Target: Logger {
                                                }
                                        }
 
-                                       // Means we succesfully traversed from the payer to the payee, now
+                                       // Means we successfully traversed from the payer to the payee, now
                                        // save this path for the payment route. Also, update the liquidity
                                        // remaining on the used hops, so that we take them into account
                                        // while looking for more paths.
@@ -7803,7 +7803,7 @@ mod tests {
        fn do_min_htlc_overpay_violates_max_htlc(blinded_payee: bool) {
                // Test that if overpaying to meet a later hop's min_htlc and causes us to violate an earlier
                // hop's max_htlc, we don't consider that candidate hop valid. Previously we would add this hop
-               // to `targets` and build an invalid path with it, and subsquently hit a debug panic asserting
+               // to `targets` and build an invalid path with it, and subsequently hit a debug panic asserting
                // that the used liquidity for a hop was less than its available liquidity limit.
                let secp_ctx = Secp256k1::new();
                let logger = Arc::new(ln_test_utils::TestLogger::new());
@@ -8452,7 +8452,7 @@ pub(crate) mod bench_utils {
                                                        }
                                                        break;
                                                }
-                                               // If we couldn't find a path with a higer amount, reduce and try again.
+                                               // If we couldn't find a path with a higher amount, reduce and try again.
                                                score_amt /= 100;
                                        }
 
index 37c036da9594747ea81b142e151ff960c2892dad..14e6bbe64a24524367661da70772f603538c3b55 100644 (file)
@@ -491,7 +491,7 @@ mod tests {
        }
 
        // Rather annoyingly, there's no safe way in Rust std to construct a Waker despite it being
-       // totally possible to construct from a trait implementation (though somewhat less effecient
+       // totally possible to construct from a trait implementation (though somewhat less efficient
        // compared to a raw VTable). Instead, we have to write out a lot of boilerplate to build a
        // waker, which we do here with a trivial Arc<AtomicBool> data element to track woke-ness.
        const WAKER_V_TABLE: RawWakerVTable = RawWakerVTable::new(waker_clone, wake, wake_by_ref, drop);