use crate::ln::chan_utils::make_funding_redeemscript_from_slices;
use crate::ln::msgs::{self, LightningError, ErrorAction};
+use crate::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
+use crate::util::events::MessageSendEvent;
+use crate::util::logger::{Level, Logger};
use crate::util::ser::Writeable;
use crate::prelude::*;
+use alloc::sync::{Arc, Weak};
+use crate::sync::Mutex;
use core::ops::Deref;
/// An error when accessing the chain via [`UtxoLookup`].
UnknownTx,
}
+/// The result of a [`UtxoLookup::get_utxo`] call. A call may resolve either synchronously,
+/// returning the `Sync` variant, or asynchronously, returning an [`UtxoFuture`] in the `Async`
+/// variant.
+pub enum UtxoResult {
+ /// A result which was resolved synchronously. It either includes a [`TxOut`] for the output
+ /// requested or a [`UtxoLookupError`].
+ Sync(Result<TxOut, UtxoLookupError>),
+ /// A result which will be resolved asynchronously. It includes a [`UtxoFuture`], a `clone` of
+ /// which you must keep locally and call [`UtxoFuture::resolve`] on once the lookup completes.
+ ///
+ /// Note that in order to avoid runaway memory usage, the number of parallel checks is limited,
+ /// but only fairly loosely. Because a pending checks block all message processing, leaving
+ /// checks pending for an extended time may cause DoS of other functions. It is recommended you
+ /// keep a tight timeout on lookups, on the order of a few seconds.
+ Async(UtxoFuture),
+}
+
/// The `UtxoLookup` trait defines behavior for accessing on-chain UTXOs.
pub trait UtxoLookup {
/// Returns the transaction output of a funding transaction encoded by [`short_channel_id`].
/// is unknown.
///
/// [`short_channel_id`]: https://github.com/lightning/bolts/blob/master/07-routing-gossip.md#definition-of-short_channel_id
- fn get_utxo(&self, genesis_hash: &BlockHash, short_channel_id: u64) -> Result<TxOut, UtxoLookupError>;
-}
-
-pub(crate) fn check_channel_announcement<U: Deref>(
- utxo_lookup: &Option<U>, msg: &msgs::UnsignedChannelAnnouncement
-) -> Result<Option<u64>, msgs::LightningError> where U::Target: UtxoLookup {
- match utxo_lookup {
- &None => {
- // Tentatively accept, potentially exposing us to DoS attacks
- Ok(None)
- },
- &Some(ref utxo_lookup) => {
- match utxo_lookup.get_utxo(&msg.chain_hash, msg.short_channel_id) {
+ fn get_utxo(&self, genesis_hash: &BlockHash, short_channel_id: u64) -> UtxoResult;
+}
+
+enum ChannelAnnouncement {
+ Full(msgs::ChannelAnnouncement),
+ Unsigned(msgs::UnsignedChannelAnnouncement),
+}
+impl ChannelAnnouncement {
+ fn node_id_1(&self) -> &NodeId {
+ match self {
+ ChannelAnnouncement::Full(msg) => &msg.contents.node_id_1,
+ ChannelAnnouncement::Unsigned(msg) => &msg.node_id_1,
+ }
+ }
+}
+
+enum NodeAnnouncement {
+ Full(msgs::NodeAnnouncement),
+ Unsigned(msgs::UnsignedNodeAnnouncement),
+}
+impl NodeAnnouncement {
+ fn timestamp(&self) -> u32 {
+ match self {
+ NodeAnnouncement::Full(msg) => msg.contents.timestamp,
+ NodeAnnouncement::Unsigned(msg) => msg.timestamp,
+ }
+ }
+}
+
+enum ChannelUpdate {
+ Full(msgs::ChannelUpdate),
+ Unsigned(msgs::UnsignedChannelUpdate),
+}
+impl ChannelUpdate {
+ fn timestamp(&self) -> u32 {
+ match self {
+ ChannelUpdate::Full(msg) => msg.contents.timestamp,
+ ChannelUpdate::Unsigned(msg) => msg.timestamp,
+ }
+ }
+}
+
+struct UtxoMessages {
+ complete: Option<Result<TxOut, UtxoLookupError>>,
+ channel_announce: Option<ChannelAnnouncement>,
+ latest_node_announce_a: Option<NodeAnnouncement>,
+ latest_node_announce_b: Option<NodeAnnouncement>,
+ latest_channel_update_a: Option<ChannelUpdate>,
+ latest_channel_update_b: Option<ChannelUpdate>,
+}
+
+/// Represents a future resolution of a [`UtxoLookup::get_utxo`] query resolving async.
+///
+/// See [`UtxoResult::Async`] and [`UtxoFuture::resolve`] for more info.
+#[derive(Clone)]
+pub struct UtxoFuture {
+ state: Arc<Mutex<UtxoMessages>>,
+}
+
+/// A trivial implementation of [`UtxoLookup`] which is used to call back into the network graph
+/// once we have a concrete resolution of a request.
+struct UtxoResolver(Result<TxOut, UtxoLookupError>);
+impl UtxoLookup for UtxoResolver {
+ fn get_utxo(&self, _genesis_hash: &BlockHash, _short_channel_id: u64) -> UtxoResult {
+ UtxoResult::Sync(self.0.clone())
+ }
+}
+
+impl UtxoFuture {
+ /// Builds a new future for later resolution.
+ pub fn new() -> Self {
+ Self { state: Arc::new(Mutex::new(UtxoMessages {
+ complete: None,
+ channel_announce: None,
+ latest_node_announce_a: None,
+ latest_node_announce_b: None,
+ latest_channel_update_a: None,
+ latest_channel_update_b: None,
+ }))}
+ }
+
+ /// Resolves this future against the given `graph` and with the given `result`.
+ ///
+ /// This is identical to calling [`UtxoFuture::resolve`] with a dummy `gossip`, disabling
+ /// forwarding the validated gossip message onwards to peers.
+ ///
+ /// Because this may cause the [`NetworkGraph`]'s [`processing_queue_high`] to flip, in order
+ /// to allow us to interact with peers again, you should call [`PeerManager::process_events`]
+ /// after this.
+ ///
+ /// [`processing_queue_high`]: crate::ln::msgs::RoutingMessageHandler::processing_queue_high
+ /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
+ pub fn resolve_without_forwarding<L: Deref>(&self,
+ graph: &NetworkGraph<L>, result: Result<TxOut, UtxoLookupError>)
+ where L::Target: Logger {
+ self.do_resolve(graph, result);
+ }
+
+ /// Resolves this future against the given `graph` and with the given `result`.
+ ///
+ /// The given `gossip` is used to broadcast any validated messages onwards to all peers which
+ /// have available buffer space.
+ ///
+ /// Because this may cause the [`NetworkGraph`]'s [`processing_queue_high`] to flip, in order
+ /// to allow us to interact with peers again, you should call [`PeerManager::process_events`]
+ /// after this.
+ ///
+ /// [`processing_queue_high`]: crate::ln::msgs::RoutingMessageHandler::processing_queue_high
+ /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
+ pub fn resolve<L: Deref, G: Deref<Target=NetworkGraph<L>>, U: Deref, GS: Deref<Target = P2PGossipSync<G, U, L>>>(&self,
+ graph: &NetworkGraph<L>, gossip: GS, result: Result<TxOut, UtxoLookupError>
+ ) where L::Target: Logger, U::Target: UtxoLookup {
+ let mut res = self.do_resolve(graph, result);
+ for msg_opt in res.iter_mut() {
+ if let Some(msg) = msg_opt.take() {
+ gossip.forward_gossip_msg(msg);
+ }
+ }
+ }
+
+ fn do_resolve<L: Deref>(&self, graph: &NetworkGraph<L>, result: Result<TxOut, UtxoLookupError>)
+ -> [Option<MessageSendEvent>; 5] where L::Target: Logger {
+ let (announcement, node_a, node_b, update_a, update_b) = {
+ let mut pending_checks = graph.pending_checks.internal.lock().unwrap();
+ let mut async_messages = self.state.lock().unwrap();
+
+ if async_messages.channel_announce.is_none() {
+ // We raced returning to `check_channel_announcement` which hasn't updated
+ // `channel_announce` yet. That's okay, we can set the `complete` field which it will
+ // check once it gets control again.
+ async_messages.complete = Some(result);
+ return [None, None, None, None, None];
+ }
+
+ let announcement_msg = match async_messages.channel_announce.as_ref().unwrap() {
+ ChannelAnnouncement::Full(signed_msg) => &signed_msg.contents,
+ ChannelAnnouncement::Unsigned(msg) => &msg,
+ };
+
+ pending_checks.lookup_completed(announcement_msg, &Arc::downgrade(&self.state));
+
+ (async_messages.channel_announce.take().unwrap(),
+ async_messages.latest_node_announce_a.take(),
+ async_messages.latest_node_announce_b.take(),
+ async_messages.latest_channel_update_a.take(),
+ async_messages.latest_channel_update_b.take())
+ };
+
+ let mut res = [None, None, None, None, None];
+ let mut res_idx = 0;
+
+ // Now that we've updated our internal state, pass the pending messages back through the
+ // network graph with a different `UtxoLookup` which will resolve immediately.
+ // Note that we ignore errors as we don't disconnect peers anyway, so there's nothing to do
+ // with them.
+ let resolver = UtxoResolver(result);
+ match announcement {
+ ChannelAnnouncement::Full(signed_msg) => {
+ if graph.update_channel_from_announcement(&signed_msg, &Some(&resolver)).is_ok() {
+ res[res_idx] = Some(MessageSendEvent::BroadcastChannelAnnouncement {
+ msg: signed_msg, update_msg: None,
+ });
+ res_idx += 1;
+ }
+ },
+ ChannelAnnouncement::Unsigned(msg) => {
+ let _ = graph.update_channel_from_unsigned_announcement(&msg, &Some(&resolver));
+ },
+ }
+
+ for announce in core::iter::once(node_a).chain(core::iter::once(node_b)) {
+ match announce {
+ Some(NodeAnnouncement::Full(signed_msg)) => {
+ if graph.update_node_from_announcement(&signed_msg).is_ok() {
+ res[res_idx] = Some(MessageSendEvent::BroadcastNodeAnnouncement {
+ msg: signed_msg,
+ });
+ res_idx += 1;
+ }
+ },
+ Some(NodeAnnouncement::Unsigned(msg)) => {
+ let _ = graph.update_node_from_unsigned_announcement(&msg);
+ },
+ None => {},
+ }
+ }
+
+ for update in core::iter::once(update_a).chain(core::iter::once(update_b)) {
+ match update {
+ Some(ChannelUpdate::Full(signed_msg)) => {
+ if graph.update_channel(&signed_msg).is_ok() {
+ res[res_idx] = Some(MessageSendEvent::BroadcastChannelUpdate {
+ msg: signed_msg,
+ });
+ res_idx += 1;
+ }
+ },
+ Some(ChannelUpdate::Unsigned(msg)) => {
+ let _ = graph.update_channel_unsigned(&msg);
+ },
+ None => {},
+ }
+ }
+
+ res
+ }
+}
+
+struct PendingChecksContext {
+ channels: HashMap<u64, Weak<Mutex<UtxoMessages>>>,
+ nodes: HashMap<NodeId, Vec<Weak<Mutex<UtxoMessages>>>>,
+}
+
+impl PendingChecksContext {
+ fn lookup_completed(&mut self,
+ msg: &msgs::UnsignedChannelAnnouncement, completed_state: &Weak<Mutex<UtxoMessages>>
+ ) {
+ if let hash_map::Entry::Occupied(e) = self.channels.entry(msg.short_channel_id) {
+ if Weak::ptr_eq(e.get(), &completed_state) {
+ e.remove();
+ }
+ }
+
+ if let hash_map::Entry::Occupied(mut e) = self.nodes.entry(msg.node_id_1) {
+ e.get_mut().retain(|elem| !Weak::ptr_eq(&elem, &completed_state));
+ if e.get().is_empty() { e.remove(); }
+ }
+ if let hash_map::Entry::Occupied(mut e) = self.nodes.entry(msg.node_id_2) {
+ e.get_mut().retain(|elem| !Weak::ptr_eq(&elem, &completed_state));
+ if e.get().is_empty() { e.remove(); }
+ }
+ }
+}
+
+/// A set of messages which are pending UTXO lookups for processing.
+pub(super) struct PendingChecks {
+ internal: Mutex<PendingChecksContext>,
+}
+
+impl PendingChecks {
+ pub(super) fn new() -> Self {
+ PendingChecks { internal: Mutex::new(PendingChecksContext {
+ channels: HashMap::new(), nodes: HashMap::new(),
+ }) }
+ }
+
+ /// Checks if there is a pending `channel_update` UTXO validation for the given channel,
+ /// and, if so, stores the channel message for handling later and returns an `Err`.
+ pub(super) fn check_hold_pending_channel_update(
+ &self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate>
+ ) -> Result<(), LightningError> {
+ let mut pending_checks = self.internal.lock().unwrap();
+ if let hash_map::Entry::Occupied(e) = pending_checks.channels.entry(msg.short_channel_id) {
+ let is_from_a = (msg.flags & 1) == 1;
+ match Weak::upgrade(e.get()) {
+ Some(msgs_ref) => {
+ let mut messages = msgs_ref.lock().unwrap();
+ let latest_update = if is_from_a {
+ &mut messages.latest_channel_update_a
+ } else {
+ &mut messages.latest_channel_update_b
+ };
+ if latest_update.is_none() || latest_update.as_ref().unwrap().timestamp() < msg.timestamp {
+ // If the messages we got has a higher timestamp, just blindly assume the
+ // signatures on the new message are correct and drop the old message. This
+ // may cause us to end up dropping valid `channel_update`s if a peer is
+ // malicious, but we should get the correct ones when the node updates them.
+ *latest_update = Some(
+ if let Some(msg) = full_msg { ChannelUpdate::Full(msg.clone()) }
+ else { ChannelUpdate::Unsigned(msg.clone()) });
+ }
+ return Err(LightningError {
+ err: "Awaiting channel_announcement validation to accept channel_update".to_owned(),
+ action: ErrorAction::IgnoreAndLog(Level::Gossip),
+ });
+ },
+ None => { e.remove(); },
+ }
+ }
+ Ok(())
+ }
+
+ /// Checks if there is a pending `node_announcement` UTXO validation for a channel with the
+ /// given node and, if so, stores the channel message for handling later and returns an `Err`.
+ pub(super) fn check_hold_pending_node_announcement(
+ &self, msg: &msgs::UnsignedNodeAnnouncement, full_msg: Option<&msgs::NodeAnnouncement>
+ ) -> Result<(), LightningError> {
+ let mut pending_checks = self.internal.lock().unwrap();
+ if let hash_map::Entry::Occupied(mut e) = pending_checks.nodes.entry(msg.node_id) {
+ let mut found_at_least_one_chan = false;
+ e.get_mut().retain(|node_msgs| {
+ match Weak::upgrade(&node_msgs) {
+ Some(chan_mtx) => {
+ let mut chan_msgs = chan_mtx.lock().unwrap();
+ if let Some(chan_announce) = &chan_msgs.channel_announce {
+ let latest_announce =
+ if *chan_announce.node_id_1() == msg.node_id {
+ &mut chan_msgs.latest_node_announce_a
+ } else {
+ &mut chan_msgs.latest_node_announce_b
+ };
+ if latest_announce.is_none() ||
+ latest_announce.as_ref().unwrap().timestamp() < msg.timestamp
+ {
+ // If the messages we got has a higher timestamp, just blindly
+ // assume the signatures on the new message are correct and drop
+ // the old message. This may cause us to end up dropping valid
+ // `node_announcement`s if a peer is malicious, but we should get
+ // the correct ones when the node updates them.
+ *latest_announce = Some(
+ if let Some(msg) = full_msg { NodeAnnouncement::Full(msg.clone()) }
+ else { NodeAnnouncement::Unsigned(msg.clone()) });
+ }
+ found_at_least_one_chan = true;
+ true
+ } else {
+ debug_assert!(false, "channel_announce is set before struct is added to node map");
+ false
+ }
+ },
+ None => false,
+ }
+ });
+ if e.get().is_empty() { e.remove(); }
+ if found_at_least_one_chan {
+ return Err(LightningError {
+ err: "Awaiting channel_announcement validation to accept node_announcement".to_owned(),
+ action: ErrorAction::IgnoreAndLog(Level::Gossip),
+ });
+ }
+ }
+ Ok(())
+ }
+
+ fn check_replace_previous_entry(msg: &msgs::UnsignedChannelAnnouncement,
+ full_msg: Option<&msgs::ChannelAnnouncement>, replacement: Option<Weak<Mutex<UtxoMessages>>>,
+ pending_channels: &mut HashMap<u64, Weak<Mutex<UtxoMessages>>>
+ ) -> Result<(), msgs::LightningError> {
+ match pending_channels.entry(msg.short_channel_id) {
+ hash_map::Entry::Occupied(mut e) => {
+ // There's already a pending lookup for the given SCID. Check if the messages
+ // are the same and, if so, return immediately (don't bother spawning another
+ // lookup if we haven't gotten that far yet).
+ match Weak::upgrade(&e.get()) {
+ Some(pending_msgs) => {
+ let pending_matches = match &pending_msgs.lock().unwrap().channel_announce {
+ Some(ChannelAnnouncement::Full(pending_msg)) => Some(pending_msg) == full_msg,
+ Some(ChannelAnnouncement::Unsigned(pending_msg)) => pending_msg == msg,
+ None => {
+ // This shouldn't actually be reachable. We set the
+ // `channel_announce` field under the same lock as setting the
+ // channel map entry. Still, we can just treat it as
+ // non-matching and let the new request fly.
+ debug_assert!(false);
+ false
+ },
+ };
+ if pending_matches {
+ return Err(LightningError {
+ err: "Channel announcement is already being checked".to_owned(),
+ action: ErrorAction::IgnoreDuplicateGossip,
+ });
+ } else {
+ // The earlier lookup is a different message. If we have another
+ // request in-flight now replace the original.
+ // Note that in the replace case whether to replace is somewhat
+ // arbitrary - both results will be handled, we're just updating the
+ // value that will be compared to future lookups with the same SCID.
+ if let Some(item) = replacement {
+ *e.get_mut() = item;
+ }
+ }
+ },
+ None => {
+ // The earlier lookup already resolved. We can't be sure its the same
+ // so just remove/replace it and move on.
+ if let Some(item) = replacement {
+ *e.get_mut() = item;
+ } else { e.remove(); }
+ },
+ }
+ },
+ hash_map::Entry::Vacant(v) => {
+ if let Some(item) = replacement { v.insert(item); }
+ },
+ }
+ Ok(())
+ }
+
+ pub(super) fn check_channel_announcement<U: Deref>(&self,
+ utxo_lookup: &Option<U>, msg: &msgs::UnsignedChannelAnnouncement,
+ full_msg: Option<&msgs::ChannelAnnouncement>
+ ) -> Result<Option<u64>, msgs::LightningError> where U::Target: UtxoLookup {
+ let handle_result = |res| {
+ match res {
Ok(TxOut { value, script_pubkey }) => {
let expected_script =
make_funding_redeemscript_from_slices(msg.bitcoin_key_1.as_slice(), msg.bitcoin_key_2.as_slice()).to_v0_p2wsh();
})
},
}
+ };
+
+ Self::check_replace_previous_entry(msg, full_msg, None,
+ &mut self.internal.lock().unwrap().channels)?;
+
+ match utxo_lookup {
+ &None => {
+ // Tentatively accept, potentially exposing us to DoS attacks
+ Ok(None)
+ },
+ &Some(ref utxo_lookup) => {
+ match utxo_lookup.get_utxo(&msg.chain_hash, msg.short_channel_id) {
+ UtxoResult::Sync(res) => handle_result(res),
+ UtxoResult::Async(future) => {
+ let mut pending_checks = self.internal.lock().unwrap();
+ let mut async_messages = future.state.lock().unwrap();
+ if let Some(res) = async_messages.complete.take() {
+ // In the unlikely event the future resolved before we managed to get it,
+ // handle the result in-line.
+ handle_result(res)
+ } else {
+ Self::check_replace_previous_entry(msg, full_msg,
+ Some(Arc::downgrade(&future.state)), &mut pending_checks.channels)?;
+ async_messages.channel_announce = Some(
+ if let Some(msg) = full_msg { ChannelAnnouncement::Full(msg.clone()) }
+ else { ChannelAnnouncement::Unsigned(msg.clone()) });
+ pending_checks.nodes.entry(msg.node_id_1)
+ .or_insert(Vec::new()).push(Arc::downgrade(&future.state));
+ pending_checks.nodes.entry(msg.node_id_2)
+ .or_insert(Vec::new()).push(Arc::downgrade(&future.state));
+ Err(LightningError {
+ err: "Channel being checked async".to_owned(),
+ action: ErrorAction::IgnoreAndLog(Level::Gossip),
+ })
+ }
+ },
+ }
+ }
+ }
+ }
+
+ /// The maximum number of pending gossip checks before [`Self::too_many_checks_pending`]
+ /// returns `true`. Note that this isn't a strict upper-bound on the number of checks pending -
+ /// each peer may, at a minimum, read one more socket buffer worth of `channel_announcement`s
+ /// which we'll have to process. With a socket buffer of 4KB and a minimum
+ /// `channel_announcement` size of, roughly, 429 bytes, this may leave us with `10*our peer
+ /// count` messages to process beyond this limit. Because we'll probably have a few peers,
+ /// there's no reason for this constant to be materially less than 30 or so, and 32 in-flight
+ /// checks should be more than enough for decent parallelism.
+ const MAX_PENDING_LOOKUPS: usize = 32;
+
+ /// Returns true if there are a large number of async checks pending and future
+ /// `channel_announcement` messages should be delayed. Note that this is only a hint and
+ /// messages already in-flight may still have to be handled for various reasons.
+ pub(super) fn too_many_checks_pending(&self) -> bool {
+ let mut pending_checks = self.internal.lock().unwrap();
+ if pending_checks.channels.len() > Self::MAX_PENDING_LOOKUPS {
+ // If we have many channel checks pending, ensure we don't have any dangling checks
+ // (i.e. checks where the user told us they'd call back but drop'd the `AccessFuture`
+ // instead) before we commit to applying backpressure.
+ pending_checks.channels.retain(|_, chan| {
+ Weak::upgrade(&chan).is_some()
+ });
+ pending_checks.nodes.retain(|_, channels| {
+ channels.retain(|chan| Weak::upgrade(&chan).is_some());
+ !channels.is_empty()
+ });
+ pending_checks.channels.len() > Self::MAX_PENDING_LOOKUPS
+ } else {
+ false
}
}
}