where F::Target: FeeEstimator,
L::Target: Logger,
{
- if cached_request.outpoints().len() == 0 { return None } // But don't prune pending claiming request yet, we may have to resurrect HTLCs
+ let request_outpoints = cached_request.outpoints();
+ if request_outpoints.is_empty() {
+ // Don't prune pending claiming request yet, we may have to resurrect HTLCs. Untractable
+ // packages cannot be aggregated and will never be split, so we cannot end up with an
+ // empty claim.
+ debug_assert!(cached_request.is_malleable());
+ return None;
+ }
+ // If we've seen transaction inclusion in the chain for all outpoints in our request, we
+ // don't need to continue generating more claims. We'll keep tracking the request to fully
+ // remove it once it reaches the confirmation threshold, or to generate a new claim if the
+ // transaction is reorged out.
+ let mut all_inputs_have_confirmed_spend = true;
+ for outpoint in &request_outpoints {
+ if let Some(first_claim_txid_height) = self.claimable_outpoints.get(outpoint) {
+ // We check for outpoint spends within claims individually rather than as a set
+ // since requests can have outpoints split off.
+ if !self.onchain_events_awaiting_threshold_conf.iter()
+ .any(|event_entry| if let OnchainEvent::Claim { claim_request } = event_entry.event {
+ first_claim_txid_height.0 == claim_request
+ } else {
+ // The onchain event is not a claim, keep seeking until we find one.
+ false
+ })
+ {
+ // Either we had no `OnchainEvent::Claim`, or we did but none matched the
+ // outpoint's registered spend.
+ all_inputs_have_confirmed_spend = false;
+ }
+ } else {
+ // The request's outpoint spend does not exist yet.
+ all_inputs_have_confirmed_spend = false;
+ }
+ }
+ if all_inputs_have_confirmed_spend {
+ return None;
+ }
// Compute new height timer to decide when we need to regenerate a new bumped version of the claim tx (if we
// didn't receive confirmation of it before, or not enough reorg-safe depth on top of it).
mine_transaction(&nodes[1], &timeout_tx);
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
- {
- // B will rebroadcast a fee-bumped timeout transaction here.
- let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
- assert_eq!(node_txn.len(), 1);
- check_spends!(node_txn[0], commitment_tx[0]);
- }
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
- {
- // B may rebroadcast its own holder commitment transaction here, as a safeguard against
- // some incredibly unlikely partial-eclipse-attack scenarios. That said, because the
- // original commitment_tx[0] (also spending chan_2.3) has reached ANTI_REORG_DELAY B really
- // shouldn't broadcast anything here, and in some connect style scenarios we do not.
- let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
- if node_txn.len() == 1 {
- check_spends!(node_txn[0], chan_2.3);
- } else {
- assert_eq!(node_txn.len(), 0);
- }
- }
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn });
let header_131 = BlockHeader { version: 0x20000000, prev_blockhash: header_130.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[0], &Block { header: header_131, txdata: Vec::new() });
- {
- let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
- assert_eq!(node_txn.len(), 2); // 2 bumped penalty txn on revoked commitment tx
-
- check_spends!(node_txn[0], revoked_local_txn[0]);
- check_spends!(node_txn[1], revoked_local_txn[0]);
- // Note that these are both bogus - they spend outputs already claimed in block 129:
- if node_txn[0].input[0].previous_output == revoked_htlc_txn[0].input[0].previous_output {
- assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[2].input[0].previous_output);
- } else {
- assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[2].input[0].previous_output);
- assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
- }
-
- node_txn.clear();
- };
// Few more blocks to confirm penalty txn
connect_blocks(&nodes[0], 4);
//! Creating blinded routes and related utilities live here.
-use bitcoin::secp256k1::{self, PublicKey, Secp256k1, SecretKey};
+use bitcoin::hashes::{Hash, HashEngine};
+use bitcoin::hashes::sha256::Hash as Sha256;
+use bitcoin::secp256k1::{self, PublicKey, Scalar, Secp256k1, SecretKey};
-use crate::chain::keysinterface::KeysInterface;
+use crate::chain::keysinterface::{KeysInterface, Recipient};
+use super::packet::ControlTlvs;
use super::utils;
use crate::ln::msgs::DecodeError;
-use crate::util::chacha20poly1305rfc::ChaChaPolyWriteAdapter;
-use crate::util::ser::{Readable, VecWriter, Writeable, Writer};
+use crate::ln::onion_utils;
+use crate::util::chacha20poly1305rfc::{ChaChaPolyReadAdapter, ChaChaPolyWriteAdapter};
+use crate::util::ser::{FixedLengthReader, LengthReadableArgs, Readable, VecWriter, Writeable, Writer};
-use crate::io;
+use core::mem;
+use core::ops::Deref;
+use crate::io::{self, Cursor};
use crate::prelude::*;
/// Onion messages can be sent and received to blinded routes, which serve to hide the identity of
blinded_hops: blinded_hops(secp_ctx, node_pks, &blinding_secret).map_err(|_| ())?,
})
}
+
+ // Advance the blinded route by one hop, so make the second hop into the new introduction node.
+ pub(super) fn advance_by_one<K: Deref, T: secp256k1::Signing + secp256k1::Verification>
+ (&mut self, keys_manager: &K, secp_ctx: &Secp256k1<T>) -> Result<(), ()>
+ where K::Target: KeysInterface
+ {
+ let control_tlvs_ss = keys_manager.ecdh(Recipient::Node, &self.blinding_point, None)?;
+ let rho = onion_utils::gen_rho_from_shared_secret(&control_tlvs_ss.secret_bytes());
+ let encrypted_control_tlvs = self.blinded_hops.remove(0).encrypted_payload;
+ let mut s = Cursor::new(&encrypted_control_tlvs);
+ let mut reader = FixedLengthReader::new(&mut s, encrypted_control_tlvs.len() as u64);
+ match ChaChaPolyReadAdapter::read(&mut reader, rho) {
+ Ok(ChaChaPolyReadAdapter { readable: ControlTlvs::Forward(ForwardTlvs {
+ mut next_node_id, next_blinding_override,
+ })}) => {
+ let mut new_blinding_point = match next_blinding_override {
+ Some(blinding_point) => blinding_point,
+ None => {
+ let blinding_factor = {
+ let mut sha = Sha256::engine();
+ sha.input(&self.blinding_point.serialize()[..]);
+ sha.input(control_tlvs_ss.as_ref());
+ Sha256::from_engine(sha).into_inner()
+ };
+ self.blinding_point.mul_tweak(secp_ctx, &Scalar::from_be_bytes(blinding_factor).unwrap())
+ .map_err(|_| ())?
+ }
+ };
+ mem::swap(&mut self.blinding_point, &mut new_blinding_point);
+ mem::swap(&mut self.introduction_node_id, &mut next_node_id);
+ Ok(())
+ },
+ _ => Err(())
+ }
+ }
}
/// Construct blinded hops for the given `unblinded_path`.
assert_eq!(err, SendError::TooBigPacket);
}
+#[test]
+fn we_are_intro_node() {
+ // If we are sending straight to a blinded route and we are the introduction node, we need to
+ // advance the blinded route by 1 hop so the second hop is the new introduction node.
+ let mut nodes = create_nodes(3);
+ let test_msg = TestCustomMessage {};
+
+ let secp_ctx = Secp256k1::new();
+ let blinded_route = BlindedRoute::new(&[nodes[0].get_node_pk(), nodes[1].get_node_pk(), nodes[2].get_node_pk()], &*nodes[2].keys_manager, &secp_ctx).unwrap();
+
+ nodes[0].messenger.send_onion_message(&[], Destination::BlindedRoute(blinded_route), OnionMessageContents::Custom(test_msg.clone()), None).unwrap();
+ pass_along_path(&nodes, None);
+
+ // Try with a two-hop blinded route where we are the introduction node.
+ let blinded_route = BlindedRoute::new(&[nodes[0].get_node_pk(), nodes[1].get_node_pk()], &*nodes[1].keys_manager, &secp_ctx).unwrap();
+ nodes[0].messenger.send_onion_message(&[], Destination::BlindedRoute(blinded_route), OnionMessageContents::Custom(test_msg), None).unwrap();
+ nodes.remove(2);
+ pass_along_path(&nodes, None);
+}
+
#[test]
fn invalid_blinded_route_error() {
// Make sure we error as expected if a provided blinded route has 0 or 1 hops.
InvalidMessage,
/// Our next-hop peer's buffer was full or our total outbound buffer was full.
BufferFull,
+ /// Failed to retrieve our node id from the provided [`KeysInterface`].
+ ///
+ /// [`KeysInterface`]: crate::chain::keysinterface::KeysInterface
+ GetNodeIdFailed,
+ /// We attempted to send to a blinded route where we are the introduction node, and failed to
+ /// advance the blinded route to make the second hop the new introduction node. Either
+ /// [`KeysInterface::ecdh`] failed, we failed to tweak the current blinding point to get the
+ /// new blinding point, or we were attempting to send to ourselves.
+ BlindedRouteAdvanceFailed,
}
/// Handler for custom onion messages. If you are using [`SimpleArcOnionMessenger`],
/// Send an onion message with contents `message` to `destination`, routing it through `intermediate_nodes`.
/// See [`OnionMessenger`] for example usage.
- pub fn send_onion_message<T: CustomOnionMessageContents>(&self, intermediate_nodes: &[PublicKey], destination: Destination, message: OnionMessageContents<T>, reply_path: Option<BlindedRoute>) -> Result<(), SendError> {
+ pub fn send_onion_message<T: CustomOnionMessageContents>(&self, intermediate_nodes: &[PublicKey], mut destination: Destination, message: OnionMessageContents<T>, reply_path: Option<BlindedRoute>) -> Result<(), SendError> {
if let Destination::BlindedRoute(BlindedRoute { ref blinded_hops, .. }) = destination {
if blinded_hops.len() < 2 {
return Err(SendError::TooFewBlindedHops);
let OnionMessageContents::Custom(ref msg) = message;
if msg.tlv_type() < 64 { return Err(SendError::InvalidMessage) }
+ // If we are sending straight to a blinded route and we are the introduction node, we need to
+ // advance the blinded route by 1 hop so the second hop is the new introduction node.
+ if intermediate_nodes.len() == 0 {
+ if let Destination::BlindedRoute(ref mut blinded_route) = destination {
+ let our_node_id = self.keys_manager.get_node_id(Recipient::Node)
+ .map_err(|()| SendError::GetNodeIdFailed)?;
+ if blinded_route.introduction_node_id == our_node_id {
+ blinded_route.advance_by_one(&self.keys_manager, &self.secp_ctx)
+ .map_err(|()| SendError::BlindedRouteAdvanceFailed)?;
+ }
+ }
+ }
+
let blinding_secret_bytes = self.keys_manager.get_secure_random_bytes();
let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted");
let (introduction_node_id, blinding_point) = if intermediate_nodes.len() != 0 {
next_blinding_override: Some(blinding_pt),
})), control_tlvs_ss));
}
- if let Some(encrypted_payload) = enc_payload_opt {
- payloads.push((Payload::Forward(ForwardControlTlvs::Blinded(encrypted_payload)),
- control_tlvs_ss));
- } else { debug_assert!(false); }
- blinded_path_idx += 1;
- } else if blinded_path_idx < num_blinded_hops - 1 && enc_payload_opt.is_some() {
+ }
+ if blinded_path_idx < num_blinded_hops.saturating_sub(1) && enc_payload_opt.is_some() {
payloads.push((Payload::Forward(ForwardControlTlvs::Blinded(enc_payload_opt.unwrap())),
control_tlvs_ss));
blinded_path_idx += 1;
if info.two_to_one.is_some() && info.two_to_one.as_ref().unwrap().last_update < min_time_unix {
info.two_to_one = None;
}
- if info.one_to_two.is_none() && info.two_to_one.is_none() {
+ if info.one_to_two.is_none() || info.two_to_one.is_none() {
// We check the announcement_received_time here to ensure we don't drop
// announcements that we just received and are just waiting for our peer to send a
// channel_update for.
for scid in scids_to_remove {
let info = channels.remove(&scid).expect("We just accessed this scid, it should be present");
Self::remove_channel_in_nodes(&mut nodes, &info, scid);
+ self.removed_channels.lock().unwrap().insert(scid, Some(current_time_unix));
}
}
assert!(network_graph.update_channel_from_announcement(&valid_channel_announcement, &chain_source).is_ok());
assert!(network_graph.read_only().channels().get(&short_channel_id).is_some());
+ // Submit two channel updates for each channel direction (update.flags bit).
let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx);
assert!(gossip_sync.handle_channel_update(&valid_channel_update).is_ok());
assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_some());
+ let valid_channel_update_2 = get_signed_channel_update(|update| {update.flags |=1;}, node_2_privkey, &secp_ctx);
+ gossip_sync.handle_channel_update(&valid_channel_update_2).unwrap();
+ assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().two_to_one.is_some());
+
network_graph.remove_stale_channels_and_tracking_with_time(100 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS);
assert_eq!(network_graph.read_only().channels().len(), 1);
assert_eq!(network_graph.read_only().nodes().len(), 2);
network_graph.remove_stale_channels_and_tracking_with_time(101 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS);
+ #[cfg(not(feature = "std"))] {
+ // Make sure removed channels are tracked.
+ assert_eq!(network_graph.removed_channels.lock().unwrap().len(), 1);
+ }
+ network_graph.remove_stale_channels_and_tracking_with_time(101 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS +
+ REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS);
+
#[cfg(feature = "std")]
{
// In std mode, a further check is performed before fully removing the channel -
// the channel_announcement must have been received at least two weeks ago. We
- // fudge that here by indicating the time has jumped two weeks. Note that the
- // directional channel information will have been removed already..
+ // fudge that here by indicating the time has jumped two weeks.
assert_eq!(network_graph.read_only().channels().len(), 1);
assert_eq!(network_graph.read_only().nodes().len(), 2);
- assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none());
+ // Note that the directional channel information will have been removed already..
+ // We want to check that this will work even if *one* of the channel updates is recent,
+ // so we should add it with a recent timestamp.
+ assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none());
use std::time::{SystemTime, UNIX_EPOCH};
let announcement_time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs();
+ let valid_channel_update = get_signed_channel_update(|unsigned_channel_update| {
+ unsigned_channel_update.timestamp = (announcement_time + 1 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS) as u32;
+ }, node_1_privkey, &secp_ctx);
+ assert!(gossip_sync.handle_channel_update(&valid_channel_update).is_ok());
+ assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_some());
network_graph.remove_stale_channels_and_tracking_with_time(announcement_time + 1 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS);
+ // Make sure removed channels are tracked.
+ assert_eq!(network_graph.removed_channels.lock().unwrap().len(), 1);
+ // Provide a later time so that sufficient time has passed
+ network_graph.remove_stale_channels_and_tracking_with_time(announcement_time + 1 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS +
+ REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS);
}
assert_eq!(network_graph.read_only().channels().len(), 0);
assert_eq!(network_graph.read_only().nodes().len(), 0);
+ assert!(network_graph.removed_channels.lock().unwrap().is_empty());
#[cfg(feature = "std")]
{
/// Enables the use of the serialization macros for objects that need to be simultaneously decrypted and
/// deserialized. This allows us to avoid an intermediate Vec allocation.
pub(crate) struct ChaChaPolyReadAdapter<R: Readable> {
- #[allow(unused)] // This will be used soon for onion messages
pub readable: R,
}