lightning-custom-message/target
lightning-transaction-sync/target
no-std-check/target
+msrv-no-dev-deps-check/target
+# 0.0.121 - Jan 22, 2024 - "Unwraps are Bad"
+
+## Bug Fixes
+ * Fix a deadlock when calling `batch_funding_transaction_generated` with
+ invalid input (#2841).
+
+## Security
+0.0.121 fixes a denial-of-service vulnerability which is reachable from
+untrusted input from peers in rare cases if we have a public channel or in
+common cases if `P2PGossipSync` is used.
+ * A peer that failed to complete its handshake would cause a reachable
+ `unwrap` in LDK since 0.0.119 when LDK attempts to broadcast gossip to all
+ peers (#2842).
+
+In total, this release features 4 files changed, 52 insertions, 10
+deletions in 4 commits from 2 authors, in alphabetical order:
+ * Jeffrey Czyz
+ * Matt Corallo
+
+# 0.0.120 - Jan 17, 2024 - "Unblinded Fuzzers"
+
+## API Updates
+ * The `PeerManager` bound on `UtxoLookup` was removed entirely. This enables
+ use of `UtxoLookup` in cases broken in 0.0.119 by #2773 (#2822).
+ * LDK now exposes and fully implements the route blinding feature (#2812).
+ * The `lightning-transaction-sync` crate no longer relies on system time
+ without the `time` feature (#2799, #2817).
+ * `lightning::onion_message`'s module layout has changed (#2821).
+ * `Event::ChannelClosed` now includes the `channel_funding_txo` (#2800).
+ * `CandidateRouteHop` variants were destructured into individual structs,
+ hiding some fields which were not generally consumable (#2802).
+
+## Bug Fixes
+ * Fixed a rare issue where `lightning-net-tokio` may not fully flush its send
+ buffer, leading to connection hangs (#2832).
+ * Fixed a panic which may occur when connecting to a peer if we opened a second
+ channel with that peer while they were disconnected (#2808).
+ * Retries for a payment which previously failed in a blinded path will now
+ always use an alternative blinded path (#2818).
+ * `Feature`'s `Eq` and `Hash` implementation now ignore dummy bytes (#2808).
+ * Some missing `DiscardFunding` or `ChannelClosed` events are now generated in
+ rare funding-related failures (#2809).
+ * Fixed a privacy issue in blinded path generation where the real
+ `cltv_expiry_delta` would be exposed to senders (#2831).
+
+## Security
+0.0.120 fixes a denial-of-service vulnerability which is reachable from
+untrusted input from peers if the `UserConfig::manually_accept_inbound_channels`
+option is enabled.
+ * A peer that sent an `open_channel` message with the `channel_type` field
+ unfilled would trigger a reachable `unwrap` since LDK 0.0.117 (#2808).
+ * In protocols where a funding output is shared with our counterparty before
+ it is given to LDK, a malicious peer could have caused a reachable panic
+ by reusing the same funding info in (#2809).
+
+In total, this release features 67 files changed, 3016 insertions, 2473
+deletions in 79 commits from 9 authors, in alphabetical order:
+ * Elias Rohrer
+ * Jeffrey Czyz
+ * José A.P
+ * Matt Corallo
+ * Tibo-lg
+ * Valentine Wallace
+ * benthecarman
+ * optout
+ * shuoer86
+
# 0.0.119 - Dec 15, 2023 - "Spring Cleaning for Christmas"
## API Updates
--------
Security is the primary focus of `rust-lightning`; disclosure of security
-vulnerabilites helps prevent user loss of funds. If you believe a vulnerability
+vulnerabilities helps prevent user loss of funds. If you believe a vulnerability
may affect other Lightning implementations, please inform them.
You can find further information on submitting (possible) vulnerabilities in the
use lightning::sign::{KeyMaterial, InMemorySigner, Recipient, EntropySource, NodeSigner, SignerProvider};
use lightning::events;
use lightning::events::MessageSendEventsProvider;
-use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
+use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
use lightning::ln::channelmanager::{ChainParameters, ChannelDetails, ChannelManager, PaymentSendFailure, ChannelManagerReadArgs, PaymentId, RecipientOnionFields};
use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
use lightning::ln::msgs::{self, CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init};
use lightning::ln::functional_test_utils::*;
use lightning::offers::invoice::{BlindedPayInfo, UnsignedBolt12Invoice};
use lightning::offers::invoice_request::UnsignedInvoiceRequest;
-use lightning::onion_message::{Destination, MessageRouter, OnionMessagePath};
+use lightning::onion_message::messenger::{Destination, MessageRouter, OnionMessagePath};
use lightning::util::test_channel_signer::{TestChannelSigner, EnforcementState};
use lightning::util::errors::APIError;
use lightning::util::logger::Logger;
self.chain_monitor.update_channel(funding_txo, update)
}
- fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
+ fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
return self.chain_monitor.release_pending_monitor_events();
}
}
},
0x89 => { fee_est_c.ret_val.store(253, atomic::Ordering::Release); nodes[2].maybe_update_chan_fees(); },
+ 0xf0 => {
+ let pending_updates = monitor_a.chain_monitor.list_pending_monitor_updates().remove(&chan_1_funding).unwrap();
+ if let Some(id) = pending_updates.get(0) {
+ monitor_a.chain_monitor.channel_monitor_updated(chan_1_funding, *id).unwrap();
+ }
+ nodes[0].process_monitor_events();
+ }
+ 0xf1 => {
+ let pending_updates = monitor_a.chain_monitor.list_pending_monitor_updates().remove(&chan_1_funding).unwrap();
+ if let Some(id) = pending_updates.get(1) {
+ monitor_a.chain_monitor.channel_monitor_updated(chan_1_funding, *id).unwrap();
+ }
+ nodes[0].process_monitor_events();
+ }
+ 0xf2 => {
+ let pending_updates = monitor_a.chain_monitor.list_pending_monitor_updates().remove(&chan_1_funding).unwrap();
+ if let Some(id) = pending_updates.last() {
+ monitor_a.chain_monitor.channel_monitor_updated(chan_1_funding, *id).unwrap();
+ }
+ nodes[0].process_monitor_events();
+ }
+
+ 0xf4 => {
+ let pending_updates = monitor_b.chain_monitor.list_pending_monitor_updates().remove(&chan_1_funding).unwrap();
+ if let Some(id) = pending_updates.get(0) {
+ monitor_b.chain_monitor.channel_monitor_updated(chan_1_funding, *id).unwrap();
+ }
+ nodes[1].process_monitor_events();
+ }
+ 0xf5 => {
+ let pending_updates = monitor_b.chain_monitor.list_pending_monitor_updates().remove(&chan_1_funding).unwrap();
+ if let Some(id) = pending_updates.get(1) {
+ monitor_b.chain_monitor.channel_monitor_updated(chan_1_funding, *id).unwrap();
+ }
+ nodes[1].process_monitor_events();
+ }
+ 0xf6 => {
+ let pending_updates = monitor_b.chain_monitor.list_pending_monitor_updates().remove(&chan_1_funding).unwrap();
+ if let Some(id) = pending_updates.last() {
+ monitor_b.chain_monitor.channel_monitor_updated(chan_1_funding, *id).unwrap();
+ }
+ nodes[1].process_monitor_events();
+ }
+
+ 0xf8 => {
+ let pending_updates = monitor_b.chain_monitor.list_pending_monitor_updates().remove(&chan_2_funding).unwrap();
+ if let Some(id) = pending_updates.get(0) {
+ monitor_b.chain_monitor.channel_monitor_updated(chan_2_funding, *id).unwrap();
+ }
+ nodes[1].process_monitor_events();
+ }
+ 0xf9 => {
+ let pending_updates = monitor_b.chain_monitor.list_pending_monitor_updates().remove(&chan_2_funding).unwrap();
+ if let Some(id) = pending_updates.get(1) {
+ monitor_b.chain_monitor.channel_monitor_updated(chan_2_funding, *id).unwrap();
+ }
+ nodes[1].process_monitor_events();
+ }
+ 0xfa => {
+ let pending_updates = monitor_b.chain_monitor.list_pending_monitor_updates().remove(&chan_2_funding).unwrap();
+ if let Some(id) = pending_updates.last() {
+ monitor_b.chain_monitor.channel_monitor_updated(chan_2_funding, *id).unwrap();
+ }
+ nodes[1].process_monitor_events();
+ }
+
+ 0xfc => {
+ let pending_updates = monitor_c.chain_monitor.list_pending_monitor_updates().remove(&chan_2_funding).unwrap();
+ if let Some(id) = pending_updates.get(0) {
+ monitor_c.chain_monitor.channel_monitor_updated(chan_2_funding, *id).unwrap();
+ }
+ nodes[2].process_monitor_events();
+ }
+ 0xfd => {
+ let pending_updates = monitor_c.chain_monitor.list_pending_monitor_updates().remove(&chan_2_funding).unwrap();
+ if let Some(id) = pending_updates.get(1) {
+ monitor_c.chain_monitor.channel_monitor_updated(chan_2_funding, *id).unwrap();
+ }
+ nodes[2].process_monitor_events();
+ }
+ 0xfe => {
+ let pending_updates = monitor_c.chain_monitor.list_pending_monitor_updates().remove(&chan_2_funding).unwrap();
+ if let Some(id) = pending_updates.last() {
+ monitor_c.chain_monitor.channel_monitor_updated(chan_2_funding, *id).unwrap();
+ }
+ nodes[2].process_monitor_events();
+ }
+
0xff => {
// Test that no channel is in a stuck state where neither party can send funds even
// after we resolve all pending events.
use bitcoin::network::constants::Network;
use bitcoin::hashes::hex::FromHex;
-use bitcoin::hashes::Hash as TraitImport;
-use bitcoin::hashes::HashEngine as TraitImportEngine;
+use bitcoin::hashes::Hash as _;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash};
use lightning::ln::functional_test_utils::*;
use lightning::offers::invoice::{BlindedPayInfo, UnsignedBolt12Invoice};
use lightning::offers::invoice_request::UnsignedInvoiceRequest;
-use lightning::onion_message::{Destination, MessageRouter, OnionMessagePath};
+use lightning::onion_message::messenger::{Destination, MessageRouter, OnionMessagePath};
use lightning::routing::gossip::{P2PGossipSync, NetworkGraph};
use lightning::routing::utxo::UtxoLookup;
use lightning::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteParameters, Router};
if let None = loss_detector.txids_confirmed.get(&funding_txid) {
let outpoint = OutPoint { txid: funding_txid, index: 0 };
for chan in channelmanager.list_channels() {
- if chan.channel_id == outpoint.to_channel_id() {
+ if chan.funding_txo == Some(outpoint) {
tx.version += 1;
continue 'search_loop;
}
use lightning::util::test_channel_signer::TestChannelSigner;
use lightning::util::logger::Logger;
use lightning::util::ser::{Readable, Writeable, Writer};
-use lightning::onion_message::{CustomOnionMessageHandler, Destination, MessageRouter, OffersMessage, OffersMessageHandler, OnionMessageContents, OnionMessagePath, OnionMessenger, PendingOnionMessage};
+use lightning::onion_message::messenger::{CustomOnionMessageHandler, Destination, MessageRouter, OnionMessagePath, OnionMessenger, PendingOnionMessage};
+use lightning::onion_message::offers::{OffersMessage, OffersMessageHandler};
+use lightning::onion_message::packet::OnionMessageContents;
use crate::utils::test_logger;
[package]
name = "lightning-background-processor"
-version = "0.0.119"
+version = "0.0.121"
authors = ["Valentine Wallace <vwallace@protonmail.com>"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning"
[dependencies]
bitcoin = { version = "0.30.2", default-features = false }
-lightning = { version = "0.0.119", path = "../lightning", default-features = false }
-lightning-rapid-gossip-sync = { version = "0.0.119", path = "../lightning-rapid-gossip-sync", default-features = false }
+lightning = { version = "0.0.121", path = "../lightning", default-features = false }
+lightning-rapid-gossip-sync = { version = "0.0.121", path = "../lightning-rapid-gossip-sync", default-features = false }
[dev-dependencies]
tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "time" ] }
-lightning = { version = "0.0.119", path = "../lightning", features = ["_test_utils"] }
-lightning-invoice = { version = "0.27.0", path = "../lightning-invoice" }
-lightning-persister = { version = "0.0.119", path = "../lightning-persister" }
+lightning = { version = "0.0.121", path = "../lightning", features = ["_test_utils"] }
+lightning-invoice = { version = "0.29.0", path = "../lightning-invoice" }
+lightning-persister = { version = "0.0.121", path = "../lightning-persister" }
use lightning::chain::transaction::OutPoint;
use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
use lightning::{get_event_msg, get_event};
- use lightning::ln::PaymentHash;
+ use lightning::ln::{PaymentHash, ChannelId};
use lightning::ln::channelmanager;
use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
use lightning::ln::features::{ChannelFeatures, NodeFeatures};
}
// Force-close the channel.
- nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 }), &nodes[1].node.get_our_node_id()).unwrap();
// Check that the force-close updates are persisted.
check_persisted_data!(nodes[0].node, filepath.clone());
[package]
name = "lightning-block-sync"
-version = "0.0.119"
+version = "0.0.121"
authors = ["Jeffrey Czyz", "Matt Corallo"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning"
[dependencies]
bitcoin = "0.30.2"
hex = { package = "hex-conservative", version = "0.1.1", default-features = false }
-lightning = { version = "0.0.119", path = "../lightning" }
+lightning = { version = "0.0.121", path = "../lightning" }
tokio = { version = "1.35", features = [ "io-util", "net", "time", "rt" ], optional = true }
serde_json = { version = "1.0", optional = true }
chunked_transfer = { version = "1.4", optional = true }
[dev-dependencies]
-lightning = { version = "0.0.119", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.121", path = "../lightning", features = ["_test_utils"] }
tokio = { version = "1.35", features = [ "macros", "rt" ] }
pub struct GossipVerifier<S: FutureSpawner,
Blocks: Deref + Send + Sync + 'static + Clone,
L: Deref + Send + Sync + 'static,
- APM: Deref + Send + Sync + 'static + Clone,
> where
Blocks::Target: UtxoSource,
L::Target: Logger,
- APM::Target: APeerManager,
{
source: Blocks,
- peer_manager: APM,
+ peer_manager_wake: Arc<dyn Fn() + Send + Sync>,
gossiper: Arc<P2PGossipSync<Arc<NetworkGraph<L>>, Self, L>>,
spawn: S,
block_cache: Arc<Mutex<VecDeque<(u32, Block)>>>,
impl<S: FutureSpawner,
Blocks: Deref + Send + Sync + Clone,
L: Deref + Send + Sync,
- APM: Deref + Send + Sync + Clone,
-> GossipVerifier<S, Blocks, L, APM> where
+> GossipVerifier<S, Blocks, L> where
Blocks::Target: UtxoSource,
L::Target: Logger,
- APM::Target: APeerManager,
{
/// Constructs a new [`GossipVerifier`].
///
/// This is expected to be given to a [`P2PGossipSync`] (initially constructed with `None` for
/// the UTXO lookup) via [`P2PGossipSync::add_utxo_lookup`].
- pub fn new(source: Blocks, spawn: S, gossiper: Arc<P2PGossipSync<Arc<NetworkGraph<L>>, Self, L>>, peer_manager: APM) -> Self {
+ pub fn new<APM: Deref + Send + Sync + Clone + 'static>(
+ source: Blocks, spawn: S, gossiper: Arc<P2PGossipSync<Arc<NetworkGraph<L>>, Self, L>>, peer_manager: APM
+ ) -> Self where APM::Target: APeerManager {
+ let peer_manager_wake = Arc::new(move || peer_manager.as_ref().process_events());
Self {
- source, spawn, gossiper, peer_manager,
+ source, spawn, gossiper, peer_manager_wake,
block_cache: Arc::new(Mutex::new(VecDeque::with_capacity(BLOCK_CACHE_SIZE))),
}
}
impl<S: FutureSpawner,
Blocks: Deref + Send + Sync + Clone,
L: Deref + Send + Sync,
- APM: Deref + Send + Sync + Clone,
-> Deref for GossipVerifier<S, Blocks, L, APM> where
+> Deref for GossipVerifier<S, Blocks, L> where
Blocks::Target: UtxoSource,
L::Target: Logger,
- APM::Target: APeerManager,
{
type Target = Self;
fn deref(&self) -> &Self { self }
impl<S: FutureSpawner,
Blocks: Deref + Send + Sync + Clone,
L: Deref + Send + Sync,
- APM: Deref + Send + Sync + Clone,
-> UtxoLookup for GossipVerifier<S, Blocks, L, APM> where
+> UtxoLookup for GossipVerifier<S, Blocks, L> where
Blocks::Target: UtxoSource,
L::Target: Logger,
- APM::Target: APeerManager,
{
fn get_utxo(&self, _chain_hash: &ChainHash, short_channel_id: u64) -> UtxoResult {
let res = UtxoFuture::new();
let source = self.source.clone();
let gossiper = Arc::clone(&self.gossiper);
let block_cache = Arc::clone(&self.block_cache);
- let pm = self.peer_manager.clone();
+ let pmw = Arc::clone(&self.peer_manager_wake);
self.spawn.spawn(async move {
let res = Self::retrieve_utxo(source, block_cache, short_channel_id).await;
fut.resolve(gossiper.network_graph(), &*gossiper, res);
- pm.as_ref().process_events();
+ (pmw)();
});
UtxoResult::Async(res)
}
[package]
name = "lightning-custom-message"
-version = "0.0.119"
+version = "0.0.121"
authors = ["Jeffrey Czyz"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning"
[dependencies]
bitcoin = "0.30.2"
-lightning = { version = "0.0.119", path = "../lightning" }
+lightning = { version = "0.0.121", path = "../lightning" }
[package]
name = "lightning-invoice"
description = "Data structures to parse and serialize BOLT11 lightning invoices"
-version = "0.27.0"
+version = "0.29.0"
authors = ["Sebastian Geisler <sgeisler@wh2.tu-dresden.de>"]
documentation = "https://docs.rs/lightning-invoice/"
license = "MIT OR Apache-2.0"
[dependencies]
bech32 = { version = "0.9.0", default-features = false }
-lightning = { version = "0.0.119", path = "../lightning", default-features = false }
+lightning = { version = "0.0.121", path = "../lightning", default-features = false }
secp256k1 = { version = "0.27.0", default-features = false, features = ["recovery", "alloc"] }
num-traits = { version = "0.2.8", default-features = false }
hashbrown = { version = "0.8", optional = true }
bitcoin = { version = "0.30.2", default-features = false }
[dev-dependencies]
-lightning = { version = "0.0.119", path = "../lightning", default-features = false, features = ["_test_utils"] }
+lightning = { version = "0.0.121", path = "../lightning", default-features = false, features = ["_test_utils"] }
hex = { package = "hex-conservative", version = "0.1.1", default-features = false }
serde_json = { version = "1"}
}
// Combine all bits from buffer with enough bits from this rounds byte so that they fill
- // a u5. Save reamining bits from byte to buffer.
+ // a u5. Save remaining bits from byte to buffer.
let from_buffer = self.buffer >> 3;
let from_byte = byte >> (3 + self.buffer_bits); // buffer_bits <= 4
[package]
name = "lightning-net-tokio"
-version = "0.0.119"
+version = "0.0.121"
authors = ["Matt Corallo"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning/"
[dependencies]
bitcoin = "0.30.2"
-lightning = { version = "0.0.119", path = "../lightning" }
+lightning = { version = "0.0.121", path = "../lightning" }
tokio = { version = "1.35", features = [ "rt", "sync", "net", "time" ] }
[dev-dependencies]
tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
-lightning = { version = "0.0.119", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.121", path = "../lightning", features = ["_test_utils"] }
written_len += res;
if written_len == data.len() { return written_len; }
},
+ Err(ref e) if e.kind() == std::io::ErrorKind::WouldBlock => {
+ continue;
+ }
Err(_) => return written_len,
}
},
[package]
name = "lightning-persister"
-version = "0.0.119"
+version = "0.0.121"
authors = ["Valentine Wallace", "Matt Corallo"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning"
[dependencies]
bitcoin = "0.30.2"
-lightning = { version = "0.0.119", path = "../lightning" }
+lightning = { version = "0.0.121", path = "../lightning" }
[target.'cfg(windows)'.dependencies]
windows-sys = { version = "0.48.0", default-features = false, features = ["Win32_Storage_FileSystem", "Win32_Foundation"] }
criterion = { version = "0.4", optional = true, default-features = false }
[dev-dependencies]
-lightning = { version = "0.0.119", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.121", path = "../lightning", features = ["_test_utils"] }
bitcoin = { version = "0.30.2", default-features = false }
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
- let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
+ let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
// Set the store's directory to read-only, which should result in
// returning an unrecoverable failure when we then attempt to persist a
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
- let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
+ let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
// Create the store with an invalid directory name and test that the
// channel fails to open because the directories fail to be created. There
[package]
name = "lightning-rapid-gossip-sync"
-version = "0.0.119"
+version = "0.0.121"
authors = ["Arik Sosman <git@arik.io>"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning"
std = ["lightning/std"]
[dependencies]
-lightning = { version = "0.0.119", path = "../lightning", default-features = false }
+lightning = { version = "0.0.121", path = "../lightning", default-features = false }
bitcoin = { version = "0.30.2", default-features = false }
[target.'cfg(ldk_bench)'.dependencies]
criterion = { version = "0.4", optional = true, default-features = false }
[dev-dependencies]
-lightning = { version = "0.0.119", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.121", path = "../lightning", features = ["_test_utils"] }
[package]
name = "lightning-transaction-sync"
-version = "0.0.119"
+version = "0.0.121"
authors = ["Elias Rohrer"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning"
async-interface = []
[dependencies]
-lightning = { version = "0.0.119", path = "../lightning", default-features = false, features = ["std"] }
+lightning = { version = "0.0.121", path = "../lightning", default-features = false, features = ["std"] }
bitcoin = { version = "0.30.2", default-features = false }
bdk-macros = "0.6"
futures = { version = "0.3", optional = true }
electrum-client = { version = "0.18.0", optional = true }
[dev-dependencies]
-lightning = { version = "0.0.119", path = "../lightning", default-features = false, features = ["std", "_test_utils"] }
+lightning = { version = "0.0.121", path = "../lightning", default-features = false, features = ["std", "_test_utils"] }
tokio = { version = "1.35.0", features = ["full"] }
[target.'cfg(not(no_download))'.dev-dependencies]
[package]
name = "lightning"
-version = "0.0.119"
+version = "0.0.121"
authors = ["Matt Corallo"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning/"
use crate::io;
use crate::io::Cursor;
use crate::ln::onion_utils;
-use crate::onion_message::ControlTlvs;
+use crate::onion_message::packet::ControlTlvs;
use crate::prelude::*;
use crate::sign::{NodeSigner, Recipient};
-use crate::util::chacha20poly1305rfc::ChaChaPolyReadAdapter;
+use crate::crypto::streams::ChaChaPolyReadAdapter;
use crate::util::ser::{FixedLengthReader, LengthReadableArgs, Writeable, Writer};
use core::mem;
pub htlc_minimum_msat: u64,
}
-impl From<CounterpartyForwardingInfo> for PaymentRelay {
- fn from(info: CounterpartyForwardingInfo) -> Self {
+impl TryFrom<CounterpartyForwardingInfo> for PaymentRelay {
+ type Error = ();
+
+ fn try_from(info: CounterpartyForwardingInfo) -> Result<Self, ()> {
let CounterpartyForwardingInfo {
fee_base_msat, fee_proportional_millionths, cltv_expiry_delta
} = info;
- Self { cltv_expiry_delta, fee_proportional_millionths, fee_base_msat }
+
+ // Avoid exposing esoteric CLTV expiry deltas
+ let cltv_expiry_delta = match cltv_expiry_delta {
+ 0..=40 => 40,
+ 41..=80 => 80,
+ 81..=144 => 144,
+ 145..=216 => 216,
+ _ => return Err(()),
+ };
+
+ Ok(Self { cltv_expiry_delta, fee_proportional_millionths, fee_base_msat })
}
}
use super::{BlindedHop, BlindedPath};
use crate::ln::msgs::DecodeError;
use crate::ln::onion_utils;
-use crate::onion_message::Destination;
-use crate::util::chacha20poly1305rfc::ChaChaPolyWriteAdapter;
+use crate::onion_message::messenger::Destination;
+use crate::crypto::streams::ChaChaPolyWriteAdapter;
use crate::util::ser::{Readable, Writeable};
use crate::io;
use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, WithChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS};
use crate::chain::transaction::{OutPoint, TransactionData};
+use crate::ln::ChannelId;
use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
use crate::events;
use crate::events::{Event, EventHandler};
///
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
/// [`Writeable::write`]: crate::util::ser::Writeable::write
- fn persist_new_channel(&self, channel_id: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
+ fn persist_new_channel(&self, channel_funding_outpoint: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
/// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
/// update.
/// [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
///
/// [`Writeable::write`]: crate::util::ser::Writeable::write
- fn update_persisted_channel(&self, channel_id: OutPoint, update: Option<&ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
+ fn update_persisted_channel(&self, channel_funding_outpoint: OutPoint, update: Option<&ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
}
struct MonitorHolder<ChannelSigner: WriteableEcdsaChannelSigner> {
persister: P,
/// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly
/// from the user and not from a [`ChannelMonitor`].
- pending_monitor_events: Mutex<Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>>,
+ pending_monitor_events: Mutex<Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)>>,
/// The best block height seen, used as a proxy for the passage of time.
highest_chain_height: AtomicUsize,
}
}
- /// Lists the funding outpoint of each [`ChannelMonitor`] being monitored.
+ /// Lists the funding outpoint and channel ID of each [`ChannelMonitor`] being monitored.
///
/// Note that [`ChannelMonitor`]s are not removed when a channel is closed as they are always
/// monitoring for on-chain state resolutions.
- pub fn list_monitors(&self) -> Vec<OutPoint> {
- self.monitors.read().unwrap().keys().map(|outpoint| *outpoint).collect()
+ pub fn list_monitors(&self) -> Vec<(OutPoint, ChannelId)> {
+ self.monitors.read().unwrap().iter().map(|(outpoint, monitor_holder)| {
+ let channel_id = monitor_holder.monitor.channel_id();
+ (*outpoint, channel_id)
+ }).collect()
}
#[cfg(not(c_bindings))]
// Completed event.
return Ok(());
}
- self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
- funding_txo,
+ let channel_id = monitor_data.monitor.channel_id();
+ self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
+ funding_txo, channel_id,
monitor_update_id: monitor_data.monitor.get_latest_update_id(),
}], monitor_data.monitor.get_counterparty_node_id()));
},
#[cfg(any(test, fuzzing))]
pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
let monitors = self.monitors.read().unwrap();
- let counterparty_node_id = monitors.get(&funding_txo).and_then(|m| m.monitor.get_counterparty_node_id());
- self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
+ let (counterparty_node_id, channel_id) = if let Some(m) = monitors.get(&funding_txo) {
+ (m.monitor.get_counterparty_node_id(), m.monitor.channel_id())
+ } else {
+ (None, ChannelId::v1_from_funding_outpoint(funding_txo))
+ };
+ self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
funding_txo,
+ channel_id,
monitor_update_id,
}], counterparty_node_id));
self.event_notifier.notify();
}
fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus {
+ // `ChannelMonitorUpdate`'s `channel_id` is `None` prior to 0.0.121 and all channels in those
+ // versions are V1-established. For 0.0.121+ the `channel_id` fields is always `Some`.
+ let channel_id = update.channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo));
// Update the monitor that watches the channel referred to by the given outpoint.
let monitors = self.monitors.read().unwrap();
match monitors.get(&funding_txo) {
None => {
- let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(funding_txo.to_channel_id()));
+ let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(channel_id));
log_error!(logger, "Failed to update channel monitor: no such monitor registered");
// We should never ever trigger this from within ChannelManager. Technically a
}
}
- fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
+ fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
for monitor_state in self.monitors.read().unwrap().values() {
let logger = WithChannelMonitor::from(&self.logger, &monitor_state.monitor);
let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();
if monitor_events.len() > 0 {
let monitor_outpoint = monitor_state.monitor.get_funding_txo().0;
+ let monitor_channel_id = monitor_state.monitor.channel_id();
let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id();
- pending_monitor_events.push((monitor_outpoint, monitor_events, counterparty_node_id));
+ pending_monitor_events.push((monitor_outpoint, monitor_channel_id, monitor_events, counterparty_node_id));
}
}
}
///
/// [`ChannelMonitorUpdateStatus::InProgress`]: super::ChannelMonitorUpdateStatus::InProgress
pub update_id: u64,
+ /// The channel ID associated with these updates.
+ ///
+ /// Will be `None` for `ChannelMonitorUpdate`s constructed on LDK versions prior to 0.0.121 and
+ /// always `Some` otherwise.
+ pub channel_id: Option<ChannelId>,
}
/// The update ID used for a [`ChannelMonitorUpdate`] that is either:
}
write_tlv_fields!(w, {
(1, self.counterparty_node_id, option),
+ (3, self.channel_id, option),
});
Ok(())
}
}
}
let mut counterparty_node_id = None;
+ let mut channel_id = None;
read_tlv_fields!(r, {
(1, counterparty_node_id, option),
+ (3, channel_id, option),
});
- Ok(Self { update_id, counterparty_node_id, updates })
+ Ok(Self { update_id, counterparty_node_id, updates, channel_id })
}
}
Completed {
/// The funding outpoint of the [`ChannelMonitor`] that was updated
funding_txo: OutPoint,
+ /// The channel ID of the channel associated with the [`ChannelMonitor`]
+ channel_id: ChannelId,
/// The Update ID from [`ChannelMonitorUpdate::update_id`] which was applied or
/// [`ChannelMonitor::get_latest_update_id`].
///
(0, Completed) => {
(0, funding_txo, required),
(2, monitor_update_id, required),
+ (4, channel_id, required),
},
;
(2, HTLCEvent),
channel_keys_id: [u8; 32],
holder_revocation_basepoint: RevocationBasepoint,
+ channel_id: ChannelId,
funding_info: (OutPoint, ScriptBuf),
current_counterparty_commitment_txid: Option<Txid>,
prev_counterparty_commitment_txid: Option<Txid>,
(13, self.spendable_txids_confirmed, required_vec),
(15, self.counterparty_fulfilled_htlcs, required),
(17, self.initial_counterparty_commitment_info, option),
+ (19, self.channel_id, required),
});
Ok(())
pub(crate) fn from_impl<S: WriteableEcdsaChannelSigner>(logger: &'a L, monitor_impl: &ChannelMonitorImpl<S>) -> Self {
let peer_id = monitor_impl.counterparty_node_id;
- let channel_id = Some(monitor_impl.funding_info.0.to_channel_id());
+ let channel_id = Some(monitor_impl.channel_id());
WithChannelMonitor {
logger, peer_id, channel_id,
}
funding_redeemscript: ScriptBuf, channel_value_satoshis: u64,
commitment_transaction_number_obscure_factor: u64,
initial_holder_commitment_tx: HolderCommitmentTransaction,
- best_block: BestBlock, counterparty_node_id: PublicKey) -> ChannelMonitor<Signer> {
+ best_block: BestBlock, counterparty_node_id: PublicKey, channel_id: ChannelId,
+ ) -> ChannelMonitor<Signer> {
assert!(commitment_transaction_number_obscure_factor <= (1 << 48));
let counterparty_payment_script = chan_utils::get_counterparty_payment_script(
channel_keys_id,
holder_revocation_basepoint,
+ channel_id,
funding_info,
current_counterparty_commitment_txid: None,
prev_counterparty_commitment_txid: None,
self.inner.lock().unwrap().get_funding_txo().clone()
}
+ /// Gets the channel_id of the channel this ChannelMonitor is monitoring for.
+ pub fn channel_id(&self) -> ChannelId {
+ self.inner.lock().unwrap().channel_id()
+ }
+
/// Gets a list of txids, with their output scripts (in the order they appear in the
/// transaction), which we must learn about spends of via block_connected().
pub fn get_outputs_to_watch(&self) -> Vec<(Txid, Vec<(u32, ScriptBuf)>)> {
self.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &bounded_fee_estimator, logger);
} else if !self.holder_tx_signed {
log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast");
- log_error!(logger, " in channel monitor for channel {}!", &self.funding_info.0.to_channel_id());
+ log_error!(logger, " in channel monitor for channel {}!", &self.channel_id());
log_error!(logger, " Read the docs for ChannelMonitor::get_latest_holder_commitment_txn and take manual action!");
} else {
// If we generated a MonitorEvent::HolderForceClosed, the ChannelManager
&self.funding_info
}
+ pub fn channel_id(&self) -> ChannelId {
+ self.channel_id
+ }
+
fn get_outputs_to_watch(&self) -> &HashMap<Txid, Vec<(u32, ScriptBuf)>> {
// If we've detected a counterparty commitment tx on chain, we must include it in the set
// of outputs to watch for spends of, otherwise we're likely to lose user funds. Because
if prevout.txid == self.funding_info.0.txid && prevout.vout == self.funding_info.0.index as u32 {
let mut balance_spendable_csv = None;
log_info!(logger, "Channel {} closed by funding output spend in txid {}.",
- &self.funding_info.0.to_channel_id(), txid);
+ &self.channel_id(), txid);
self.funding_spend_seen = true;
let mut commitment_tx_to_counterparty_output = None;
if (tx.input[0].sequence.0 >> 8*3) as u8 == 0x80 && (tx.lock_time.to_consensus_u32() >> 8*3) as u8 == 0x20 {
log_debug!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor));
self.pending_events.push(Event::SpendableOutputs {
outputs: vec![descriptor],
- channel_id: Some(self.funding_info.0.to_channel_id()),
+ channel_id: Some(self.channel_id()),
});
self.spendable_txids_confirmed.push(entry.txid);
},
let mut spendable_txids_confirmed = Some(Vec::new());
let mut counterparty_fulfilled_htlcs = Some(HashMap::new());
let mut initial_counterparty_commitment_info = None;
+ let mut channel_id = None;
read_tlv_fields!(reader, {
(1, funding_spend_confirmed, option),
(3, htlcs_resolved_on_chain, optional_vec),
(13, spendable_txids_confirmed, optional_vec),
(15, counterparty_fulfilled_htlcs, option),
(17, initial_counterparty_commitment_info, option),
+ (19, channel_id, option),
});
// Monitors for anchor outputs channels opened in v0.0.116 suffered from a bug in which the
channel_keys_id,
holder_revocation_basepoint,
+ channel_id: channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(outpoint)),
funding_info,
current_counterparty_commitment_txid,
prev_counterparty_commitment_txid,
use crate::chain::package::{weight_offered_htlc, weight_received_htlc, weight_revoked_offered_htlc, weight_revoked_received_htlc, WEIGHT_REVOKED_OUTPUT};
use crate::chain::transaction::OutPoint;
use crate::sign::InMemorySigner;
- use crate::ln::{PaymentPreimage, PaymentHash};
+ use crate::ln::{PaymentPreimage, PaymentHash, ChannelId};
use crate::ln::channel_keys::{DelayedPaymentBasepoint, DelayedPaymentKey, HtlcBasepoint, RevocationBasepoint, RevocationKey};
use crate::ln::chan_utils::{self,HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
use crate::ln::channelmanager::{PaymentSendFailure, PaymentId, RecipientOnionFields};
preimages_slice_to_htlcs!($preimages_slice).into_iter().map(|(htlc, _)| (htlc, None)).collect()
}
}
- let dummy_sig = crate::util::crypto::sign(&secp_ctx,
+ let dummy_sig = crate::crypto::utils::sign(&secp_ctx,
&bitcoin::secp256k1::Message::from_slice(&[42; 32]).unwrap(),
&SecretKey::from_slice(&[42; 32]).unwrap());
htlc_basepoint: HtlcBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[48; 32]).unwrap()))
};
let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::max_value() };
+ let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
let channel_parameters = ChannelTransactionParameters {
holder_pubkeys: keys.holder_channel_pubkeys.clone(),
holder_selected_contest_delay: 66,
Some(ShutdownScript::new_p2wpkh_from_pubkey(shutdown_pubkey).into_inner()), 0, &ScriptBuf::new(),
(OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, ScriptBuf::new()),
&channel_parameters, ScriptBuf::new(), 46, 0, HolderCommitmentTransaction::dummy(&mut Vec::new()),
- best_block, dummy_key);
+ best_block, dummy_key, channel_id);
let mut htlcs = preimages_slice_to_htlcs!(preimages[0..10]);
let dummy_commitment_tx = HolderCommitmentTransaction::dummy(&mut htlcs);
htlc_basepoint: HtlcBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[48; 32]).unwrap())),
};
let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::max_value() };
+ let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
let channel_parameters = ChannelTransactionParameters {
holder_pubkeys: keys.holder_channel_pubkeys.clone(),
holder_selected_contest_delay: 66,
Some(ShutdownScript::new_p2wpkh_from_pubkey(shutdown_pubkey).into_inner()), 0, &ScriptBuf::new(),
(OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, ScriptBuf::new()),
&channel_parameters, ScriptBuf::new(), 46, 0, HolderCommitmentTransaction::dummy(&mut Vec::new()),
- best_block, dummy_key);
+ best_block, dummy_key, channel_id);
- let chan_id = monitor.inner.lock().unwrap().funding_info.0.to_channel_id().clone();
+ let chan_id = monitor.inner.lock().unwrap().channel_id();
let context_logger = WithChannelMonitor::from(&logger, &monitor);
log_error!(context_logger, "This is an error");
log_warn!(context_logger, "This is an error");
use bitcoin::secp256k1::PublicKey;
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, MonitorEvent};
+use crate::ln::ChannelId;
use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
use crate::chain::transaction::{OutPoint, TransactionData};
///
/// For details on asynchronous [`ChannelMonitor`] updating and returning
/// [`MonitorEvent::Completed`] here, see [`ChannelMonitorUpdateStatus::InProgress`].
- fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>;
+ fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)>;
}
/// The `Filter` trait defines behavior for indicating chain activity of interest pertaining to
//! Types describing on-chain transactions.
-use crate::ln::ChannelId;
use bitcoin::hash_types::Txid;
-use bitcoin::hashes::Hash;
use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
use bitcoin::blockdata::transaction::Transaction;
}
impl OutPoint {
- /// Convert an `OutPoint` to a lightning channel id.
- pub fn to_channel_id(&self) -> ChannelId {
- ChannelId::v1_from_funding_txid(self.txid.as_byte_array(), self.index)
- }
-
/// Converts this OutPoint into the OutPoint field as used by rust-bitcoin
///
/// This is not exported to bindings users as the same type is used universally in the C bindings
#[cfg(test)]
mod tests {
use crate::chain::transaction::OutPoint;
+ use crate::ln::ChannelId;
use bitcoin::blockdata::transaction::Transaction;
use bitcoin::consensus::encode;
#[test]
fn test_channel_id_calculation() {
let tx: Transaction = encode::deserialize(&<Vec<u8>>::from_hex("020000000001010e0adef48412e4361325ac1c6e36411299ab09d4f083b9d8ddb55fbc06e1b0c00000000000feffffff0220a1070000000000220020f81d95e040bd0a493e38bae27bff52fe2bb58b93b293eb579c01c31b05c5af1dc072cfee54a3000016001434b1d6211af5551905dc2642d05f5b04d25a8fe80247304402207f570e3f0de50546aad25a872e3df059d277e776dda4269fa0d2cc8c2ee6ec9a022054e7fae5ca94d47534c86705857c24ceea3ad51c69dd6051c5850304880fc43a012103cb11a1bacc223d98d91f1946c6752e358a5eb1a1c983b3e6fb15378f453b76bd00000000").unwrap()[..]).unwrap();
- assert_eq!(&OutPoint {
+ assert_eq!(&ChannelId::v1_from_funding_outpoint(OutPoint {
txid: tx.txid(),
index: 0
- }.to_channel_id().0[..], &<Vec<u8>>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25e").unwrap()[..]);
- assert_eq!(&OutPoint {
+ }).0[..], &<Vec<u8>>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25e").unwrap()[..]);
+ assert_eq!(&ChannelId::v1_from_funding_outpoint(OutPoint {
txid: tx.txid(),
index: 1
- }.to_channel_id().0[..], &<Vec<u8>>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25f").unwrap()[..]);
+ }).0[..], &<Vec<u8>>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25f").unwrap()[..]);
}
}
--- /dev/null
+// This file was stolen from rust-crypto.
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+#[cfg(not(fuzzing))]
+mod real_chacha {
+ use core::cmp;
+ use core::convert::TryInto;
+
+ #[derive(Clone, Copy, PartialEq, Eq)]
+ #[allow(non_camel_case_types)]
+ struct u32x4(pub u32, pub u32, pub u32, pub u32);
+ impl ::core::ops::Add for u32x4 {
+ type Output = u32x4;
+ #[inline]
+ fn add(self, rhs: u32x4) -> u32x4 {
+ u32x4(self.0.wrapping_add(rhs.0),
+ self.1.wrapping_add(rhs.1),
+ self.2.wrapping_add(rhs.2),
+ self.3.wrapping_add(rhs.3))
+ }
+ }
+ impl ::core::ops::Sub for u32x4 {
+ type Output = u32x4;
+ #[inline]
+ fn sub(self, rhs: u32x4) -> u32x4 {
+ u32x4(self.0.wrapping_sub(rhs.0),
+ self.1.wrapping_sub(rhs.1),
+ self.2.wrapping_sub(rhs.2),
+ self.3.wrapping_sub(rhs.3))
+ }
+ }
+ impl ::core::ops::BitXor for u32x4 {
+ type Output = u32x4;
+ #[inline]
+ fn bitxor(self, rhs: u32x4) -> u32x4 {
+ u32x4(self.0 ^ rhs.0, self.1 ^ rhs.1, self.2 ^ rhs.2, self.3 ^ rhs.3)
+ }
+ }
+ impl ::core::ops::Shr<u8> for u32x4 {
+ type Output = u32x4;
+ #[inline]
+ fn shr(self, shr: u8) -> u32x4 {
+ u32x4(self.0 >> shr, self.1 >> shr, self.2 >> shr, self.3 >> shr)
+ }
+ }
+ impl ::core::ops::Shl<u8> for u32x4 {
+ type Output = u32x4;
+ #[inline]
+ fn shl(self, shl: u8) -> u32x4 {
+ u32x4(self.0 << shl, self.1 << shl, self.2 << shl, self.3 << shl)
+ }
+ }
+ impl u32x4 {
+ #[inline]
+ fn from_bytes(bytes: &[u8]) -> Self {
+ assert_eq!(bytes.len(), 4*4);
+ Self (
+ u32::from_le_bytes(bytes[0*4..1*4].try_into().expect("len is 4")),
+ u32::from_le_bytes(bytes[1*4..2*4].try_into().expect("len is 4")),
+ u32::from_le_bytes(bytes[2*4..3*4].try_into().expect("len is 4")),
+ u32::from_le_bytes(bytes[3*4..4*4].try_into().expect("len is 4")),
+ )
+ }
+ }
+
+ const BLOCK_SIZE: usize = 64;
+
+ #[derive(Clone,Copy)]
+ struct ChaChaState {
+ a: u32x4,
+ b: u32x4,
+ c: u32x4,
+ d: u32x4
+ }
+
+ #[derive(Copy)]
+ pub struct ChaCha20 {
+ state : ChaChaState,
+ output : [u8; BLOCK_SIZE],
+ offset : usize,
+ }
+
+ impl Clone for ChaCha20 { fn clone(&self) -> ChaCha20 { *self } }
+
+ macro_rules! swizzle {
+ ($b: expr, $c: expr, $d: expr) => {{
+ let u32x4(b10, b11, b12, b13) = $b;
+ $b = u32x4(b11, b12, b13, b10);
+ let u32x4(c10, c11, c12, c13) = $c;
+ $c = u32x4(c12, c13,c10, c11);
+ let u32x4(d10, d11, d12, d13) = $d;
+ $d = u32x4(d13, d10, d11, d12);
+ }}
+ }
+
+ macro_rules! state_to_buffer {
+ ($state: expr, $output: expr) => {{
+ let u32x4(a1, a2, a3, a4) = $state.a;
+ let u32x4(b1, b2, b3, b4) = $state.b;
+ let u32x4(c1, c2, c3, c4) = $state.c;
+ let u32x4(d1, d2, d3, d4) = $state.d;
+ let lens = [
+ a1,a2,a3,a4,
+ b1,b2,b3,b4,
+ c1,c2,c3,c4,
+ d1,d2,d3,d4
+ ];
+ for i in 0..lens.len() {
+ $output[i*4..(i+1)*4].copy_from_slice(&lens[i].to_le_bytes());
+ }
+ }}
+ }
+
+ macro_rules! round{
+ ($state: expr) => {{
+ $state.a = $state.a + $state.b;
+ rotate!($state.d, $state.a, 16);
+ $state.c = $state.c + $state.d;
+ rotate!($state.b, $state.c, 12);
+ $state.a = $state.a + $state.b;
+ rotate!($state.d, $state.a, 8);
+ $state.c = $state.c + $state.d;
+ rotate!($state.b, $state.c, 7);
+ }}
+ }
+
+ macro_rules! rotate {
+ ($a: expr, $b: expr, $rot: expr) => {{
+ let v = $a ^ $b;
+ let r = 32 - $rot;
+ let right = v >> r;
+ $a = (v << $rot) ^ right
+ }}
+ }
+
+ impl ChaCha20 {
+ pub fn new(key: &[u8], nonce: &[u8]) -> ChaCha20 {
+ assert!(key.len() == 16 || key.len() == 32);
+ assert!(nonce.len() == 8 || nonce.len() == 12);
+
+ ChaCha20{ state: ChaCha20::expand(key, nonce), output: [0u8; BLOCK_SIZE], offset: 64 }
+ }
+
+ /// Get one block from a ChaCha stream.
+ pub fn get_single_block(key: &[u8; 32], nonce: &[u8; 16]) -> [u8; 32] {
+ let mut chacha = ChaCha20 { state: ChaCha20::expand(key, nonce), output: [0u8; BLOCK_SIZE], offset: 64 };
+ let mut chacha_bytes = [0; 32];
+ chacha.process_in_place(&mut chacha_bytes);
+ chacha_bytes
+ }
+
+ /// Encrypts `src` into `dest` using a single block from a ChaCha stream. Passing `dest` as
+ /// `src` in a second call will decrypt it.
+ pub fn encrypt_single_block(
+ key: &[u8; 32], nonce: &[u8; 16], dest: &mut [u8], src: &[u8]
+ ) {
+ debug_assert_eq!(dest.len(), src.len());
+ debug_assert!(dest.len() <= 32);
+
+ let block = ChaCha20::get_single_block(key, nonce);
+ for i in 0..dest.len() {
+ dest[i] = block[i] ^ src[i];
+ }
+ }
+
+ /// Same as `encrypt_single_block` only operates on a fixed-size input in-place.
+ pub fn encrypt_single_block_in_place(
+ key: &[u8; 32], nonce: &[u8; 16], bytes: &mut [u8; 32]
+ ) {
+ let block = ChaCha20::get_single_block(key, nonce);
+ for i in 0..bytes.len() {
+ bytes[i] = block[i] ^ bytes[i];
+ }
+ }
+
+ fn expand(key: &[u8], nonce: &[u8]) -> ChaChaState {
+ let constant = match key.len() {
+ 16 => b"expand 16-byte k",
+ 32 => b"expand 32-byte k",
+ _ => unreachable!(),
+ };
+ ChaChaState {
+ a: u32x4::from_bytes(&constant[0..16]),
+ b: u32x4::from_bytes(&key[0..16]),
+ c: if key.len() == 16 {
+ u32x4::from_bytes(&key[0..16])
+ } else {
+ u32x4::from_bytes(&key[16..32])
+ },
+ d: if nonce.len() == 16 {
+ u32x4::from_bytes(&nonce[0..16])
+ } else if nonce.len() == 12 {
+ let mut nonce4 = [0; 4*4];
+ nonce4[4..].copy_from_slice(nonce);
+ u32x4::from_bytes(&nonce4)
+ } else {
+ let mut nonce4 = [0; 4*4];
+ nonce4[8..].copy_from_slice(nonce);
+ u32x4::from_bytes(&nonce4)
+ }
+ }
+ }
+
+ // put the the next BLOCK_SIZE keystream bytes into self.output
+ fn update(&mut self) {
+ let mut state = self.state;
+
+ for _ in 0..10 {
+ round!(state);
+ swizzle!(state.b, state.c, state.d);
+ round!(state);
+ swizzle!(state.d, state.c, state.b);
+ }
+ state.a = state.a + self.state.a;
+ state.b = state.b + self.state.b;
+ state.c = state.c + self.state.c;
+ state.d = state.d + self.state.d;
+
+ state_to_buffer!(state, self.output);
+
+ self.state.d = self.state.d + u32x4(1, 0, 0, 0);
+ let u32x4(c12, _, _, _) = self.state.d;
+ if c12 == 0 {
+ // we could increment the other counter word with an 8 byte nonce
+ // but other implementations like boringssl have this same
+ // limitation
+ panic!("counter is exhausted");
+ }
+
+ self.offset = 0;
+ }
+
+ #[inline] // Useful cause input may be 0s on stack that should be optimized out
+ pub fn process(&mut self, input: &[u8], output: &mut [u8]) {
+ assert!(input.len() == output.len());
+ let len = input.len();
+ let mut i = 0;
+ while i < len {
+ // If there is no keystream available in the output buffer,
+ // generate the next block.
+ if self.offset == BLOCK_SIZE {
+ self.update();
+ }
+
+ // Process the min(available keystream, remaining input length).
+ let count = cmp::min(BLOCK_SIZE - self.offset, len - i);
+ // explicitly assert lengths to avoid bounds checks:
+ assert!(output.len() >= i + count);
+ assert!(input.len() >= i + count);
+ assert!(self.output.len() >= self.offset + count);
+ for j in 0..count {
+ output[i + j] = input[i + j] ^ self.output[self.offset + j];
+ }
+ i += count;
+ self.offset += count;
+ }
+ }
+
+ pub fn process_in_place(&mut self, input_output: &mut [u8]) {
+ let len = input_output.len();
+ let mut i = 0;
+ while i < len {
+ // If there is no keystream available in the output buffer,
+ // generate the next block.
+ if self.offset == BLOCK_SIZE {
+ self.update();
+ }
+
+ // Process the min(available keystream, remaining input length).
+ let count = cmp::min(BLOCK_SIZE - self.offset, len - i);
+ // explicitly assert lengths to avoid bounds checks:
+ assert!(input_output.len() >= i + count);
+ assert!(self.output.len() >= self.offset + count);
+ for j in 0..count {
+ input_output[i + j] ^= self.output[self.offset + j];
+ }
+ i += count;
+ self.offset += count;
+ }
+ }
+
+ #[cfg(test)]
+ pub fn seek_to_block(&mut self, block_offset: u32) {
+ self.state.d.0 = block_offset;
+ self.update();
+ }
+ }
+}
+#[cfg(not(fuzzing))]
+pub use self::real_chacha::ChaCha20;
+
+#[cfg(fuzzing)]
+mod fuzzy_chacha {
+ pub struct ChaCha20 {}
+
+ impl ChaCha20 {
+ pub fn new(key: &[u8], nonce: &[u8]) -> ChaCha20 {
+ assert!(key.len() == 16 || key.len() == 32);
+ assert!(nonce.len() == 8 || nonce.len() == 12);
+ Self {}
+ }
+
+ pub fn get_single_block(_key: &[u8; 32], _nonce: &[u8; 16]) -> [u8; 32] {
+ [0; 32]
+ }
+
+ pub fn encrypt_single_block(
+ _key: &[u8; 32], _nonce: &[u8; 16], dest: &mut [u8], src: &[u8]
+ ) {
+ debug_assert_eq!(dest.len(), src.len());
+ debug_assert!(dest.len() <= 32);
+ }
+
+ pub fn encrypt_single_block_in_place(
+ _key: &[u8; 32], _nonce: &[u8; 16], _bytes: &mut [u8; 32]
+ ) {}
+
+ pub fn process(&mut self, input: &[u8], output: &mut [u8]) {
+ output.copy_from_slice(input);
+ }
+
+ pub fn process_in_place(&mut self, _input_output: &mut [u8]) {}
+ }
+}
+#[cfg(fuzzing)]
+pub use self::fuzzy_chacha::ChaCha20;
+
+#[cfg(test)]
+mod test {
+ use alloc::vec;
+ use alloc::vec::{Vec};
+ use core::convert::TryInto;
+ use core::iter::repeat;
+
+ use super::ChaCha20;
+
+ #[test]
+ fn test_chacha20_256_tls_vectors() {
+ struct TestVector {
+ key: [u8; 32],
+ nonce: [u8; 8],
+ keystream: Vec<u8>,
+ }
+ // taken from http://tools.ietf.org/html/draft-agl-tls-chacha20poly1305-04
+ let test_vectors = vec!(
+ TestVector{
+ key: [
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ ],
+ nonce: [ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ],
+ keystream: vec!(
+ 0x76, 0xb8, 0xe0, 0xad, 0xa0, 0xf1, 0x3d, 0x90,
+ 0x40, 0x5d, 0x6a, 0xe5, 0x53, 0x86, 0xbd, 0x28,
+ 0xbd, 0xd2, 0x19, 0xb8, 0xa0, 0x8d, 0xed, 0x1a,
+ 0xa8, 0x36, 0xef, 0xcc, 0x8b, 0x77, 0x0d, 0xc7,
+ 0xda, 0x41, 0x59, 0x7c, 0x51, 0x57, 0x48, 0x8d,
+ 0x77, 0x24, 0xe0, 0x3f, 0xb8, 0xd8, 0x4a, 0x37,
+ 0x6a, 0x43, 0xb8, 0xf4, 0x15, 0x18, 0xa1, 0x1c,
+ 0xc3, 0x87, 0xb6, 0x69, 0xb2, 0xee, 0x65, 0x86,
+ ),
+ }, TestVector{
+ key: [
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ ],
+ nonce: [ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ],
+ keystream: vec!(
+ 0x45, 0x40, 0xf0, 0x5a, 0x9f, 0x1f, 0xb2, 0x96,
+ 0xd7, 0x73, 0x6e, 0x7b, 0x20, 0x8e, 0x3c, 0x96,
+ 0xeb, 0x4f, 0xe1, 0x83, 0x46, 0x88, 0xd2, 0x60,
+ 0x4f, 0x45, 0x09, 0x52, 0xed, 0x43, 0x2d, 0x41,
+ 0xbb, 0xe2, 0xa0, 0xb6, 0xea, 0x75, 0x66, 0xd2,
+ 0xa5, 0xd1, 0xe7, 0xe2, 0x0d, 0x42, 0xaf, 0x2c,
+ 0x53, 0xd7, 0x92, 0xb1, 0xc4, 0x3f, 0xea, 0x81,
+ 0x7e, 0x9a, 0xd2, 0x75, 0xae, 0x54, 0x69, 0x63,
+ ),
+ }, TestVector{
+ key: [
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ ],
+ nonce: [ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 ],
+ keystream: vec!(
+ 0xde, 0x9c, 0xba, 0x7b, 0xf3, 0xd6, 0x9e, 0xf5,
+ 0xe7, 0x86, 0xdc, 0x63, 0x97, 0x3f, 0x65, 0x3a,
+ 0x0b, 0x49, 0xe0, 0x15, 0xad, 0xbf, 0xf7, 0x13,
+ 0x4f, 0xcb, 0x7d, 0xf1, 0x37, 0x82, 0x10, 0x31,
+ 0xe8, 0x5a, 0x05, 0x02, 0x78, 0xa7, 0x08, 0x45,
+ 0x27, 0x21, 0x4f, 0x73, 0xef, 0xc7, 0xfa, 0x5b,
+ 0x52, 0x77, 0x06, 0x2e, 0xb7, 0xa0, 0x43, 0x3e,
+ 0x44, 0x5f, 0x41, 0xe3,
+ ),
+ }, TestVector{
+ key: [
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ ],
+ nonce: [ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ],
+ keystream: vec!(
+ 0xef, 0x3f, 0xdf, 0xd6, 0xc6, 0x15, 0x78, 0xfb,
+ 0xf5, 0xcf, 0x35, 0xbd, 0x3d, 0xd3, 0x3b, 0x80,
+ 0x09, 0x63, 0x16, 0x34, 0xd2, 0x1e, 0x42, 0xac,
+ 0x33, 0x96, 0x0b, 0xd1, 0x38, 0xe5, 0x0d, 0x32,
+ 0x11, 0x1e, 0x4c, 0xaf, 0x23, 0x7e, 0xe5, 0x3c,
+ 0xa8, 0xad, 0x64, 0x26, 0x19, 0x4a, 0x88, 0x54,
+ 0x5d, 0xdc, 0x49, 0x7a, 0x0b, 0x46, 0x6e, 0x7d,
+ 0x6b, 0xbd, 0xb0, 0x04, 0x1b, 0x2f, 0x58, 0x6b,
+ ),
+ }, TestVector{
+ key: [
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ ],
+ nonce: [ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 ],
+ keystream: vec!(
+ 0xf7, 0x98, 0xa1, 0x89, 0xf1, 0x95, 0xe6, 0x69,
+ 0x82, 0x10, 0x5f, 0xfb, 0x64, 0x0b, 0xb7, 0x75,
+ 0x7f, 0x57, 0x9d, 0xa3, 0x16, 0x02, 0xfc, 0x93,
+ 0xec, 0x01, 0xac, 0x56, 0xf8, 0x5a, 0xc3, 0xc1,
+ 0x34, 0xa4, 0x54, 0x7b, 0x73, 0x3b, 0x46, 0x41,
+ 0x30, 0x42, 0xc9, 0x44, 0x00, 0x49, 0x17, 0x69,
+ 0x05, 0xd3, 0xbe, 0x59, 0xea, 0x1c, 0x53, 0xf1,
+ 0x59, 0x16, 0x15, 0x5c, 0x2b, 0xe8, 0x24, 0x1a,
+ 0x38, 0x00, 0x8b, 0x9a, 0x26, 0xbc, 0x35, 0x94,
+ 0x1e, 0x24, 0x44, 0x17, 0x7c, 0x8a, 0xde, 0x66,
+ 0x89, 0xde, 0x95, 0x26, 0x49, 0x86, 0xd9, 0x58,
+ 0x89, 0xfb, 0x60, 0xe8, 0x46, 0x29, 0xc9, 0xbd,
+ 0x9a, 0x5a, 0xcb, 0x1c, 0xc1, 0x18, 0xbe, 0x56,
+ 0x3e, 0xb9, 0xb3, 0xa4, 0xa4, 0x72, 0xf8, 0x2e,
+ 0x09, 0xa7, 0xe7, 0x78, 0x49, 0x2b, 0x56, 0x2e,
+ 0xf7, 0x13, 0x0e, 0x88, 0xdf, 0xe0, 0x31, 0xc7,
+ 0x9d, 0xb9, 0xd4, 0xf7, 0xc7, 0xa8, 0x99, 0x15,
+ 0x1b, 0x9a, 0x47, 0x50, 0x32, 0xb6, 0x3f, 0xc3,
+ 0x85, 0x24, 0x5f, 0xe0, 0x54, 0xe3, 0xdd, 0x5a,
+ 0x97, 0xa5, 0xf5, 0x76, 0xfe, 0x06, 0x40, 0x25,
+ 0xd3, 0xce, 0x04, 0x2c, 0x56, 0x6a, 0xb2, 0xc5,
+ 0x07, 0xb1, 0x38, 0xdb, 0x85, 0x3e, 0x3d, 0x69,
+ 0x59, 0x66, 0x09, 0x96, 0x54, 0x6c, 0xc9, 0xc4,
+ 0xa6, 0xea, 0xfd, 0xc7, 0x77, 0xc0, 0x40, 0xd7,
+ 0x0e, 0xaf, 0x46, 0xf7, 0x6d, 0xad, 0x39, 0x79,
+ 0xe5, 0xc5, 0x36, 0x0c, 0x33, 0x17, 0x16, 0x6a,
+ 0x1c, 0x89, 0x4c, 0x94, 0xa3, 0x71, 0x87, 0x6a,
+ 0x94, 0xdf, 0x76, 0x28, 0xfe, 0x4e, 0xaa, 0xf2,
+ 0xcc, 0xb2, 0x7d, 0x5a, 0xaa, 0xe0, 0xad, 0x7a,
+ 0xd0, 0xf9, 0xd4, 0xb6, 0xad, 0x3b, 0x54, 0x09,
+ 0x87, 0x46, 0xd4, 0x52, 0x4d, 0x38, 0x40, 0x7a,
+ 0x6d, 0xeb, 0x3a, 0xb7, 0x8f, 0xab, 0x78, 0xc9,
+ ),
+ },
+ );
+
+ for tv in test_vectors.iter() {
+ let mut c = ChaCha20::new(&tv.key, &tv.nonce);
+ let input: Vec<u8> = repeat(0).take(tv.keystream.len()).collect();
+ let mut output: Vec<u8> = repeat(0).take(input.len()).collect();
+ c.process(&input[..], &mut output[..]);
+ assert_eq!(output, tv.keystream);
+ }
+ }
+
+ #[test]
+ fn test_chacha20_256_tls_vectors_96_nonce() {
+ struct TestVector {
+ key: [u8; 32],
+ nonce: [u8; 12],
+ keystream: Vec<u8>,
+ }
+ // taken from http://tools.ietf.org/html/draft-agl-tls-chacha20poly1305-04
+ let test_vectors = vec!(
+ TestVector{
+ key: [
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ ],
+ nonce: [ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ],
+ keystream: vec!(
+ 0x76, 0xb8, 0xe0, 0xad, 0xa0, 0xf1, 0x3d, 0x90,
+ 0x40, 0x5d, 0x6a, 0xe5, 0x53, 0x86, 0xbd, 0x28,
+ 0xbd, 0xd2, 0x19, 0xb8, 0xa0, 0x8d, 0xed, 0x1a,
+ 0xa8, 0x36, 0xef, 0xcc, 0x8b, 0x77, 0x0d, 0xc7,
+ 0xda, 0x41, 0x59, 0x7c, 0x51, 0x57, 0x48, 0x8d,
+ 0x77, 0x24, 0xe0, 0x3f, 0xb8, 0xd8, 0x4a, 0x37,
+ 0x6a, 0x43, 0xb8, 0xf4, 0x15, 0x18, 0xa1, 0x1c,
+ 0xc3, 0x87, 0xb6, 0x69, 0xb2, 0xee, 0x65, 0x86,
+ ),
+ }, TestVector{
+ key: [
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ ],
+ nonce: [ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ],
+ keystream: vec!(
+ 0x45, 0x40, 0xf0, 0x5a, 0x9f, 0x1f, 0xb2, 0x96,
+ 0xd7, 0x73, 0x6e, 0x7b, 0x20, 0x8e, 0x3c, 0x96,
+ 0xeb, 0x4f, 0xe1, 0x83, 0x46, 0x88, 0xd2, 0x60,
+ 0x4f, 0x45, 0x09, 0x52, 0xed, 0x43, 0x2d, 0x41,
+ 0xbb, 0xe2, 0xa0, 0xb6, 0xea, 0x75, 0x66, 0xd2,
+ 0xa5, 0xd1, 0xe7, 0xe2, 0x0d, 0x42, 0xaf, 0x2c,
+ 0x53, 0xd7, 0x92, 0xb1, 0xc4, 0x3f, 0xea, 0x81,
+ 0x7e, 0x9a, 0xd2, 0x75, 0xae, 0x54, 0x69, 0x63,
+ ),
+ }, TestVector{
+ key: [
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ ],
+ nonce: [ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 ],
+ keystream: vec!(
+ 0xde, 0x9c, 0xba, 0x7b, 0xf3, 0xd6, 0x9e, 0xf5,
+ 0xe7, 0x86, 0xdc, 0x63, 0x97, 0x3f, 0x65, 0x3a,
+ 0x0b, 0x49, 0xe0, 0x15, 0xad, 0xbf, 0xf7, 0x13,
+ 0x4f, 0xcb, 0x7d, 0xf1, 0x37, 0x82, 0x10, 0x31,
+ 0xe8, 0x5a, 0x05, 0x02, 0x78, 0xa7, 0x08, 0x45,
+ 0x27, 0x21, 0x4f, 0x73, 0xef, 0xc7, 0xfa, 0x5b,
+ 0x52, 0x77, 0x06, 0x2e, 0xb7, 0xa0, 0x43, 0x3e,
+ 0x44, 0x5f, 0x41, 0xe3,
+ ),
+ }, TestVector{
+ key: [
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ ],
+ nonce: [ 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ],
+ keystream: vec!(
+ 0xef, 0x3f, 0xdf, 0xd6, 0xc6, 0x15, 0x78, 0xfb,
+ 0xf5, 0xcf, 0x35, 0xbd, 0x3d, 0xd3, 0x3b, 0x80,
+ 0x09, 0x63, 0x16, 0x34, 0xd2, 0x1e, 0x42, 0xac,
+ 0x33, 0x96, 0x0b, 0xd1, 0x38, 0xe5, 0x0d, 0x32,
+ 0x11, 0x1e, 0x4c, 0xaf, 0x23, 0x7e, 0xe5, 0x3c,
+ 0xa8, 0xad, 0x64, 0x26, 0x19, 0x4a, 0x88, 0x54,
+ 0x5d, 0xdc, 0x49, 0x7a, 0x0b, 0x46, 0x6e, 0x7d,
+ 0x6b, 0xbd, 0xb0, 0x04, 0x1b, 0x2f, 0x58, 0x6b,
+ ),
+ }, TestVector{
+ key: [
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ ],
+ nonce: [0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 ],
+ keystream: vec!(
+ 0xf7, 0x98, 0xa1, 0x89, 0xf1, 0x95, 0xe6, 0x69,
+ 0x82, 0x10, 0x5f, 0xfb, 0x64, 0x0b, 0xb7, 0x75,
+ 0x7f, 0x57, 0x9d, 0xa3, 0x16, 0x02, 0xfc, 0x93,
+ 0xec, 0x01, 0xac, 0x56, 0xf8, 0x5a, 0xc3, 0xc1,
+ 0x34, 0xa4, 0x54, 0x7b, 0x73, 0x3b, 0x46, 0x41,
+ 0x30, 0x42, 0xc9, 0x44, 0x00, 0x49, 0x17, 0x69,
+ 0x05, 0xd3, 0xbe, 0x59, 0xea, 0x1c, 0x53, 0xf1,
+ 0x59, 0x16, 0x15, 0x5c, 0x2b, 0xe8, 0x24, 0x1a,
+ 0x38, 0x00, 0x8b, 0x9a, 0x26, 0xbc, 0x35, 0x94,
+ 0x1e, 0x24, 0x44, 0x17, 0x7c, 0x8a, 0xde, 0x66,
+ 0x89, 0xde, 0x95, 0x26, 0x49, 0x86, 0xd9, 0x58,
+ 0x89, 0xfb, 0x60, 0xe8, 0x46, 0x29, 0xc9, 0xbd,
+ 0x9a, 0x5a, 0xcb, 0x1c, 0xc1, 0x18, 0xbe, 0x56,
+ 0x3e, 0xb9, 0xb3, 0xa4, 0xa4, 0x72, 0xf8, 0x2e,
+ 0x09, 0xa7, 0xe7, 0x78, 0x49, 0x2b, 0x56, 0x2e,
+ 0xf7, 0x13, 0x0e, 0x88, 0xdf, 0xe0, 0x31, 0xc7,
+ 0x9d, 0xb9, 0xd4, 0xf7, 0xc7, 0xa8, 0x99, 0x15,
+ 0x1b, 0x9a, 0x47, 0x50, 0x32, 0xb6, 0x3f, 0xc3,
+ 0x85, 0x24, 0x5f, 0xe0, 0x54, 0xe3, 0xdd, 0x5a,
+ 0x97, 0xa5, 0xf5, 0x76, 0xfe, 0x06, 0x40, 0x25,
+ 0xd3, 0xce, 0x04, 0x2c, 0x56, 0x6a, 0xb2, 0xc5,
+ 0x07, 0xb1, 0x38, 0xdb, 0x85, 0x3e, 0x3d, 0x69,
+ 0x59, 0x66, 0x09, 0x96, 0x54, 0x6c, 0xc9, 0xc4,
+ 0xa6, 0xea, 0xfd, 0xc7, 0x77, 0xc0, 0x40, 0xd7,
+ 0x0e, 0xaf, 0x46, 0xf7, 0x6d, 0xad, 0x39, 0x79,
+ 0xe5, 0xc5, 0x36, 0x0c, 0x33, 0x17, 0x16, 0x6a,
+ 0x1c, 0x89, 0x4c, 0x94, 0xa3, 0x71, 0x87, 0x6a,
+ 0x94, 0xdf, 0x76, 0x28, 0xfe, 0x4e, 0xaa, 0xf2,
+ 0xcc, 0xb2, 0x7d, 0x5a, 0xaa, 0xe0, 0xad, 0x7a,
+ 0xd0, 0xf9, 0xd4, 0xb6, 0xad, 0x3b, 0x54, 0x09,
+ 0x87, 0x46, 0xd4, 0x52, 0x4d, 0x38, 0x40, 0x7a,
+ 0x6d, 0xeb, 0x3a, 0xb7, 0x8f, 0xab, 0x78, 0xc9,
+ ),
+ },
+ );
+
+ for tv in test_vectors.iter() {
+ let mut c = ChaCha20::new(&tv.key, &tv.nonce);
+ let input: Vec<u8> = repeat(0).take(tv.keystream.len()).collect();
+ let mut output: Vec<u8> = repeat(0).take(input.len()).collect();
+ c.process(&input[..], &mut output[..]);
+ assert_eq!(output, tv.keystream);
+ }
+ }
+
+ #[test]
+ fn get_single_block() {
+ // Test that `get_single_block` (which takes a 16-byte nonce) is equivalent to getting a block
+ // using a 12-byte nonce, with the block starting at the counter offset given by the remaining 4
+ // bytes.
+ let key = [
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ ];
+ let nonce_16bytes = [
+ 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b
+ ];
+ let counter_pos = &nonce_16bytes[..4];
+ let nonce_12bytes = &nonce_16bytes[4..];
+
+ // Initialize a ChaCha20 instance with its counter starting at 0.
+ let mut chacha20 = ChaCha20::new(&key, nonce_12bytes);
+ // Seek its counter to the block at counter_pos.
+ chacha20.seek_to_block(u32::from_le_bytes(counter_pos.try_into().unwrap()));
+ let mut block_bytes = [0; 32];
+ chacha20.process_in_place(&mut block_bytes);
+
+ assert_eq!(ChaCha20::get_single_block(&key, &nonce_16bytes), block_bytes);
+ }
+
+ #[test]
+ fn encrypt_single_block() {
+ let key = [
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ ];
+ let nonce = [
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ ];
+ let bytes = [1; 32];
+
+ let mut encrypted_bytes = [0; 32];
+ ChaCha20::encrypt_single_block(&key, &nonce, &mut encrypted_bytes, &bytes);
+
+ let mut decrypted_bytes = [0; 32];
+ ChaCha20::encrypt_single_block(&key, &nonce, &mut decrypted_bytes, &encrypted_bytes);
+
+ assert_eq!(bytes, decrypted_bytes);
+ }
+
+ #[test]
+ fn encrypt_single_block_in_place() {
+ let key = [
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ ];
+ let nonce = [
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ ];
+ let unencrypted_bytes = [1; 32];
+ let mut bytes = unencrypted_bytes;
+
+ ChaCha20::encrypt_single_block_in_place(&key, &nonce, &mut bytes);
+ assert_ne!(bytes, unencrypted_bytes);
+
+ ChaCha20::encrypt_single_block_in_place(&key, &nonce, &mut bytes);
+ assert_eq!(bytes, unencrypted_bytes);
+ }
+}
--- /dev/null
+// ring has a garbage API so its use is avoided, but rust-crypto doesn't have RFC-variant poly1305
+// Instead, we steal rust-crypto's implementation and tweak it to match the RFC.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+//
+// This is a port of Andrew Moons poly1305-donna
+// https://github.com/floodyberry/poly1305-donna
+
+#[cfg(not(fuzzing))]
+mod real_chachapoly {
+ use super::super::chacha20::ChaCha20;
+ use super::super::poly1305::Poly1305;
+ use super::super::fixed_time_eq;
+
+ #[derive(Clone, Copy)]
+ pub struct ChaCha20Poly1305RFC {
+ cipher: ChaCha20,
+ mac: Poly1305,
+ finished: bool,
+ data_len: usize,
+ aad_len: u64,
+ }
+
+ impl ChaCha20Poly1305RFC {
+ #[inline]
+ fn pad_mac_16(mac: &mut Poly1305, len: usize) {
+ if len % 16 != 0 {
+ mac.input(&[0; 16][0..16 - (len % 16)]);
+ }
+ }
+ pub fn new(key: &[u8], nonce: &[u8], aad: &[u8]) -> ChaCha20Poly1305RFC {
+ assert!(key.len() == 16 || key.len() == 32);
+ assert!(nonce.len() == 12);
+
+ // Ehh, I'm too lazy to *also* tweak ChaCha20 to make it RFC-compliant
+ assert!(nonce[0] == 0 && nonce[1] == 0 && nonce[2] == 0 && nonce[3] == 0);
+
+ let mut cipher = ChaCha20::new(key, &nonce[4..]);
+ let mut mac_key = [0u8; 64];
+ let zero_key = [0u8; 64];
+ cipher.process(&zero_key, &mut mac_key);
+
+ let mut mac = Poly1305::new(&mac_key[..32]);
+ mac.input(aad);
+ ChaCha20Poly1305RFC::pad_mac_16(&mut mac, aad.len());
+
+ ChaCha20Poly1305RFC {
+ cipher,
+ mac,
+ finished: false,
+ data_len: 0,
+ aad_len: aad.len() as u64,
+ }
+ }
+
+ pub fn encrypt(&mut self, input: &[u8], output: &mut [u8], out_tag: &mut [u8]) {
+ assert!(input.len() == output.len());
+ assert!(self.finished == false);
+ self.cipher.process(input, output);
+ self.data_len += input.len();
+ self.mac.input(output);
+ ChaCha20Poly1305RFC::pad_mac_16(&mut self.mac, self.data_len);
+ self.finished = true;
+ self.mac.input(&self.aad_len.to_le_bytes());
+ self.mac.input(&(self.data_len as u64).to_le_bytes());
+ self.mac.raw_result(out_tag);
+ }
+
+ pub fn encrypt_full_message_in_place(&mut self, input_output: &mut [u8], out_tag: &mut [u8]) {
+ self.encrypt_in_place(input_output);
+ self.finish_and_get_tag(out_tag);
+ }
+
+ // Encrypt `input_output` in-place. To finish and calculate the tag, use `finish_and_get_tag`
+ // below.
+ pub(in super::super) fn encrypt_in_place(&mut self, input_output: &mut [u8]) {
+ debug_assert!(self.finished == false);
+ self.cipher.process_in_place(input_output);
+ self.data_len += input_output.len();
+ self.mac.input(input_output);
+ }
+
+ // If we were previously encrypting with `encrypt_in_place`, this method can be used to finish
+ // encrypting and calculate the tag.
+ pub(in super::super) fn finish_and_get_tag(&mut self, out_tag: &mut [u8]) {
+ debug_assert!(self.finished == false);
+ ChaCha20Poly1305RFC::pad_mac_16(&mut self.mac, self.data_len);
+ self.finished = true;
+ self.mac.input(&self.aad_len.to_le_bytes());
+ self.mac.input(&(self.data_len as u64).to_le_bytes());
+ self.mac.raw_result(out_tag);
+ }
+
+ /// Decrypt the `input`, checking the given `tag` prior to writing the decrypted contents
+ /// into `output`. Note that, because `output` is not touched until the `tag` is checked,
+ /// this decryption is *variable time*.
+ pub fn variable_time_decrypt(&mut self, input: &[u8], output: &mut [u8], tag: &[u8]) -> Result<(), ()> {
+ assert!(input.len() == output.len());
+ assert!(self.finished == false);
+
+ self.finished = true;
+
+ self.mac.input(input);
+
+ self.data_len += input.len();
+ ChaCha20Poly1305RFC::pad_mac_16(&mut self.mac, self.data_len);
+ self.mac.input(&self.aad_len.to_le_bytes());
+ self.mac.input(&(self.data_len as u64).to_le_bytes());
+
+ let mut calc_tag = [0u8; 16];
+ self.mac.raw_result(&mut calc_tag);
+ if fixed_time_eq(&calc_tag, tag) {
+ self.cipher.process(input, output);
+ Ok(())
+ } else {
+ Err(())
+ }
+ }
+
+ pub fn check_decrypt_in_place(&mut self, input_output: &mut [u8], tag: &[u8]) -> Result<(), ()> {
+ self.decrypt_in_place(input_output);
+ if self.finish_and_check_tag(tag) { Ok(()) } else { Err(()) }
+ }
+
+ /// Decrypt in place, without checking the tag. Use `finish_and_check_tag` to check it
+ /// later when decryption finishes.
+ ///
+ /// Should never be `pub` because the public API should always enforce tag checking.
+ pub(in super::super) fn decrypt_in_place(&mut self, input_output: &mut [u8]) {
+ debug_assert!(self.finished == false);
+ self.mac.input(input_output);
+ self.data_len += input_output.len();
+ self.cipher.process_in_place(input_output);
+ }
+
+ /// If we were previously decrypting with `just_decrypt_in_place`, this method must be used
+ /// to check the tag. Returns whether or not the tag is valid.
+ pub(in super::super) fn finish_and_check_tag(&mut self, tag: &[u8]) -> bool {
+ debug_assert!(self.finished == false);
+ self.finished = true;
+ ChaCha20Poly1305RFC::pad_mac_16(&mut self.mac, self.data_len);
+ self.mac.input(&self.aad_len.to_le_bytes());
+ self.mac.input(&(self.data_len as u64).to_le_bytes());
+
+ let mut calc_tag = [0u8; 16];
+ self.mac.raw_result(&mut calc_tag);
+ if fixed_time_eq(&calc_tag, tag) {
+ true
+ } else {
+ false
+ }
+ }
+ }
+}
+#[cfg(not(fuzzing))]
+pub use self::real_chachapoly::ChaCha20Poly1305RFC;
+
+#[cfg(fuzzing)]
+mod fuzzy_chachapoly {
+ #[derive(Clone, Copy)]
+ pub struct ChaCha20Poly1305RFC {
+ tag: [u8; 16],
+ finished: bool,
+ }
+ impl ChaCha20Poly1305RFC {
+ pub fn new(key: &[u8], nonce: &[u8], _aad: &[u8]) -> ChaCha20Poly1305RFC {
+ assert!(key.len() == 16 || key.len() == 32);
+ assert!(nonce.len() == 12);
+
+ // Ehh, I'm too lazy to *also* tweak ChaCha20 to make it RFC-compliant
+ assert!(nonce[0] == 0 && nonce[1] == 0 && nonce[2] == 0 && nonce[3] == 0);
+
+ let mut tag = [0; 16];
+ tag.copy_from_slice(&key[0..16]);
+
+ ChaCha20Poly1305RFC {
+ tag,
+ finished: false,
+ }
+ }
+
+ pub fn encrypt(&mut self, input: &[u8], output: &mut [u8], out_tag: &mut [u8]) {
+ assert!(input.len() == output.len());
+ assert!(self.finished == false);
+
+ output.copy_from_slice(&input);
+ out_tag.copy_from_slice(&self.tag);
+ self.finished = true;
+ }
+
+ pub fn encrypt_full_message_in_place(&mut self, input_output: &mut [u8], out_tag: &mut [u8]) {
+ self.encrypt_in_place(input_output);
+ self.finish_and_get_tag(out_tag);
+ }
+
+ pub(in super::super) fn encrypt_in_place(&mut self, _input_output: &mut [u8]) {
+ assert!(self.finished == false);
+ }
+
+ pub(in super::super) fn finish_and_get_tag(&mut self, out_tag: &mut [u8]) {
+ assert!(self.finished == false);
+ out_tag.copy_from_slice(&self.tag);
+ self.finished = true;
+ }
+
+ pub fn variable_time_decrypt(&mut self, input: &[u8], output: &mut [u8], tag: &[u8]) -> Result<(), ()> {
+ assert!(input.len() == output.len());
+ assert!(self.finished == false);
+
+ if tag[..] != self.tag[..] { return Err(()); }
+ output.copy_from_slice(input);
+ self.finished = true;
+ Ok(())
+ }
+
+ pub fn check_decrypt_in_place(&mut self, input_output: &mut [u8], tag: &[u8]) -> Result<(), ()> {
+ self.decrypt_in_place(input_output);
+ if self.finish_and_check_tag(tag) { Ok(()) } else { Err(()) }
+ }
+
+ pub(in super::super) fn decrypt_in_place(&mut self, _input: &mut [u8]) {
+ assert!(self.finished == false);
+ }
+
+ pub(in super::super) fn finish_and_check_tag(&mut self, tag: &[u8]) -> bool {
+ if tag[..] != self.tag[..] { return false; }
+ self.finished = true;
+ true
+ }
+ }
+}
+#[cfg(fuzzing)]
+pub use self::fuzzy_chachapoly::ChaCha20Poly1305RFC;
--- /dev/null
+use bitcoin::hashes::cmp::fixed_time_eq;
+
+pub(crate) mod chacha20;
+#[cfg(not(fuzzing))]
+pub(crate) mod poly1305;
+pub(crate) mod chacha20poly1305rfc;
+pub(crate) mod streams;
+pub(crate) mod utils;
--- /dev/null
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+// This is a port of Andrew Moons poly1305-donna
+// https://github.com/floodyberry/poly1305-donna
+
+use core::cmp::min;
+use core::convert::TryInto;
+
+#[derive(Clone, Copy)]
+pub struct Poly1305 {
+ r : [u32; 5],
+ h : [u32; 5],
+ pad : [u32; 4],
+ leftover : usize,
+ buffer : [u8; 16],
+ finalized : bool,
+}
+
+impl Poly1305 {
+ pub fn new(key: &[u8]) -> Poly1305 {
+ assert!(key.len() == 32);
+ let mut poly = Poly1305{ r: [0u32; 5], h: [0u32; 5], pad: [0u32; 4], leftover: 0, buffer: [0u8; 16], finalized: false };
+
+ // r &= 0xffffffc0ffffffc0ffffffc0fffffff
+ poly.r[0] = (u32::from_le_bytes(key[ 0.. 4].try_into().expect("len is 4")) ) & 0x3ffffff;
+ poly.r[1] = (u32::from_le_bytes(key[ 3.. 7].try_into().expect("len is 4")) >> 2) & 0x3ffff03;
+ poly.r[2] = (u32::from_le_bytes(key[ 6..10].try_into().expect("len is 4")) >> 4) & 0x3ffc0ff;
+ poly.r[3] = (u32::from_le_bytes(key[ 9..13].try_into().expect("len is 4")) >> 6) & 0x3f03fff;
+ poly.r[4] = (u32::from_le_bytes(key[12..16].try_into().expect("len is 4")) >> 8) & 0x00fffff;
+
+ poly.pad[0] = u32::from_le_bytes(key[16..20].try_into().expect("len is 4"));
+ poly.pad[1] = u32::from_le_bytes(key[20..24].try_into().expect("len is 4"));
+ poly.pad[2] = u32::from_le_bytes(key[24..28].try_into().expect("len is 4"));
+ poly.pad[3] = u32::from_le_bytes(key[28..32].try_into().expect("len is 4"));
+
+ poly
+ }
+
+ fn block(&mut self, m: &[u8]) {
+ let hibit : u32 = if self.finalized { 0 } else { 1 << 24 };
+
+ let r0 = self.r[0];
+ let r1 = self.r[1];
+ let r2 = self.r[2];
+ let r3 = self.r[3];
+ let r4 = self.r[4];
+
+ let s1 = r1 * 5;
+ let s2 = r2 * 5;
+ let s3 = r3 * 5;
+ let s4 = r4 * 5;
+
+ let mut h0 = self.h[0];
+ let mut h1 = self.h[1];
+ let mut h2 = self.h[2];
+ let mut h3 = self.h[3];
+ let mut h4 = self.h[4];
+
+ // h += m
+ h0 += (u32::from_le_bytes(m[ 0.. 4].try_into().expect("len is 4")) ) & 0x3ffffff;
+ h1 += (u32::from_le_bytes(m[ 3.. 7].try_into().expect("len is 4")) >> 2) & 0x3ffffff;
+ h2 += (u32::from_le_bytes(m[ 6..10].try_into().expect("len is 4")) >> 4) & 0x3ffffff;
+ h3 += (u32::from_le_bytes(m[ 9..13].try_into().expect("len is 4")) >> 6) & 0x3ffffff;
+ h4 += (u32::from_le_bytes(m[12..16].try_into().expect("len is 4")) >> 8) | hibit;
+
+ // h *= r
+ let d0 = (h0 as u64 * r0 as u64) + (h1 as u64 * s4 as u64) + (h2 as u64 * s3 as u64) + (h3 as u64 * s2 as u64) + (h4 as u64 * s1 as u64);
+ let mut d1 = (h0 as u64 * r1 as u64) + (h1 as u64 * r0 as u64) + (h2 as u64 * s4 as u64) + (h3 as u64 * s3 as u64) + (h4 as u64 * s2 as u64);
+ let mut d2 = (h0 as u64 * r2 as u64) + (h1 as u64 * r1 as u64) + (h2 as u64 * r0 as u64) + (h3 as u64 * s4 as u64) + (h4 as u64 * s3 as u64);
+ let mut d3 = (h0 as u64 * r3 as u64) + (h1 as u64 * r2 as u64) + (h2 as u64 * r1 as u64) + (h3 as u64 * r0 as u64) + (h4 as u64 * s4 as u64);
+ let mut d4 = (h0 as u64 * r4 as u64) + (h1 as u64 * r3 as u64) + (h2 as u64 * r2 as u64) + (h3 as u64 * r1 as u64) + (h4 as u64 * r0 as u64);
+
+ // (partial) h %= p
+ let mut c : u32;
+ c = (d0 >> 26) as u32; h0 = d0 as u32 & 0x3ffffff;
+ d1 += c as u64; c = (d1 >> 26) as u32; h1 = d1 as u32 & 0x3ffffff;
+ d2 += c as u64; c = (d2 >> 26) as u32; h2 = d2 as u32 & 0x3ffffff;
+ d3 += c as u64; c = (d3 >> 26) as u32; h3 = d3 as u32 & 0x3ffffff;
+ d4 += c as u64; c = (d4 >> 26) as u32; h4 = d4 as u32 & 0x3ffffff;
+ h0 += c * 5; c = h0 >> 26; h0 = h0 & 0x3ffffff;
+ h1 += c;
+
+ self.h[0] = h0;
+ self.h[1] = h1;
+ self.h[2] = h2;
+ self.h[3] = h3;
+ self.h[4] = h4;
+ }
+
+ pub fn finish(&mut self) {
+ if self.leftover > 0 {
+ self.buffer[self.leftover] = 1;
+ for i in self.leftover+1..16 {
+ self.buffer[i] = 0;
+ }
+ self.finalized = true;
+ let tmp = self.buffer;
+ self.block(&tmp);
+ }
+
+ // fully carry h
+ let mut h0 = self.h[0];
+ let mut h1 = self.h[1];
+ let mut h2 = self.h[2];
+ let mut h3 = self.h[3];
+ let mut h4 = self.h[4];
+
+ let mut c : u32;
+ c = h1 >> 26; h1 = h1 & 0x3ffffff;
+ h2 += c; c = h2 >> 26; h2 = h2 & 0x3ffffff;
+ h3 += c; c = h3 >> 26; h3 = h3 & 0x3ffffff;
+ h4 += c; c = h4 >> 26; h4 = h4 & 0x3ffffff;
+ h0 += c * 5; c = h0 >> 26; h0 = h0 & 0x3ffffff;
+ h1 += c;
+
+ // compute h + -p
+ let mut g0 = h0.wrapping_add(5); c = g0 >> 26; g0 &= 0x3ffffff;
+ let mut g1 = h1.wrapping_add(c); c = g1 >> 26; g1 &= 0x3ffffff;
+ let mut g2 = h2.wrapping_add(c); c = g2 >> 26; g2 &= 0x3ffffff;
+ let mut g3 = h3.wrapping_add(c); c = g3 >> 26; g3 &= 0x3ffffff;
+ let mut g4 = h4.wrapping_add(c).wrapping_sub(1 << 26);
+
+ // select h if h < p, or h + -p if h >= p
+ let mut mask = (g4 >> (32 - 1)).wrapping_sub(1);
+ g0 &= mask;
+ g1 &= mask;
+ g2 &= mask;
+ g3 &= mask;
+ g4 &= mask;
+ mask = !mask;
+ h0 = (h0 & mask) | g0;
+ h1 = (h1 & mask) | g1;
+ h2 = (h2 & mask) | g2;
+ h3 = (h3 & mask) | g3;
+ h4 = (h4 & mask) | g4;
+
+ // h = h % (2^128)
+ h0 = ((h0 ) | (h1 << 26)) & 0xffffffff;
+ h1 = ((h1 >> 6) | (h2 << 20)) & 0xffffffff;
+ h2 = ((h2 >> 12) | (h3 << 14)) & 0xffffffff;
+ h3 = ((h3 >> 18) | (h4 << 8)) & 0xffffffff;
+
+ // h = mac = (h + pad) % (2^128)
+ let mut f : u64;
+ f = h0 as u64 + self.pad[0] as u64 ; h0 = f as u32;
+ f = h1 as u64 + self.pad[1] as u64 + (f >> 32); h1 = f as u32;
+ f = h2 as u64 + self.pad[2] as u64 + (f >> 32); h2 = f as u32;
+ f = h3 as u64 + self.pad[3] as u64 + (f >> 32); h3 = f as u32;
+
+ self.h[0] = h0;
+ self.h[1] = h1;
+ self.h[2] = h2;
+ self.h[3] = h3;
+ }
+
+ pub fn input(&mut self, data: &[u8]) {
+ assert!(!self.finalized);
+ let mut m = data;
+
+ if self.leftover > 0 {
+ let want = min(16 - self.leftover, m.len());
+ for i in 0..want {
+ self.buffer[self.leftover+i] = m[i];
+ }
+ m = &m[want..];
+ self.leftover += want;
+
+ if self.leftover < 16 {
+ return;
+ }
+
+ // self.block(self.buffer[..]);
+ let tmp = self.buffer;
+ self.block(&tmp);
+
+ self.leftover = 0;
+ }
+
+ while m.len() >= 16 {
+ self.block(&m[0..16]);
+ m = &m[16..];
+ }
+
+ for i in 0..m.len() {
+ self.buffer[i] = m[i];
+ }
+ self.leftover = m.len();
+ }
+
+ pub fn raw_result(&mut self, output: &mut [u8]) {
+ assert!(output.len() >= 16);
+ if !self.finalized{
+ self.finish();
+ }
+ output[0..4].copy_from_slice(&self.h[0].to_le_bytes());
+ output[4..8].copy_from_slice(&self.h[1].to_le_bytes());
+ output[8..12].copy_from_slice(&self.h[2].to_le_bytes());
+ output[12..16].copy_from_slice(&self.h[3].to_le_bytes());
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use core::iter::repeat;
+ use alloc::vec::Vec;
+
+ use super::Poly1305;
+
+ fn poly1305(key: &[u8], msg: &[u8], mac: &mut [u8]) {
+ let mut poly = Poly1305::new(key);
+ poly.input(msg);
+ poly.raw_result(mac);
+ }
+
+ #[test]
+ fn test_nacl_vector() {
+ let key = [
+ 0xee,0xa6,0xa7,0x25,0x1c,0x1e,0x72,0x91,
+ 0x6d,0x11,0xc2,0xcb,0x21,0x4d,0x3c,0x25,
+ 0x25,0x39,0x12,0x1d,0x8e,0x23,0x4e,0x65,
+ 0x2d,0x65,0x1f,0xa4,0xc8,0xcf,0xf8,0x80,
+ ];
+
+ let msg = [
+ 0x8e,0x99,0x3b,0x9f,0x48,0x68,0x12,0x73,
+ 0xc2,0x96,0x50,0xba,0x32,0xfc,0x76,0xce,
+ 0x48,0x33,0x2e,0xa7,0x16,0x4d,0x96,0xa4,
+ 0x47,0x6f,0xb8,0xc5,0x31,0xa1,0x18,0x6a,
+ 0xc0,0xdf,0xc1,0x7c,0x98,0xdc,0xe8,0x7b,
+ 0x4d,0xa7,0xf0,0x11,0xec,0x48,0xc9,0x72,
+ 0x71,0xd2,0xc2,0x0f,0x9b,0x92,0x8f,0xe2,
+ 0x27,0x0d,0x6f,0xb8,0x63,0xd5,0x17,0x38,
+ 0xb4,0x8e,0xee,0xe3,0x14,0xa7,0xcc,0x8a,
+ 0xb9,0x32,0x16,0x45,0x48,0xe5,0x26,0xae,
+ 0x90,0x22,0x43,0x68,0x51,0x7a,0xcf,0xea,
+ 0xbd,0x6b,0xb3,0x73,0x2b,0xc0,0xe9,0xda,
+ 0x99,0x83,0x2b,0x61,0xca,0x01,0xb6,0xde,
+ 0x56,0x24,0x4a,0x9e,0x88,0xd5,0xf9,0xb3,
+ 0x79,0x73,0xf6,0x22,0xa4,0x3d,0x14,0xa6,
+ 0x59,0x9b,0x1f,0x65,0x4c,0xb4,0x5a,0x74,
+ 0xe3,0x55,0xa5,
+ ];
+
+ let expected = [
+ 0xf3,0xff,0xc7,0x70,0x3f,0x94,0x00,0xe5,
+ 0x2a,0x7d,0xfb,0x4b,0x3d,0x33,0x05,0xd9,
+ ];
+
+ let mut mac = [0u8; 16];
+ poly1305(&key, &msg, &mut mac);
+ assert_eq!(&mac[..], &expected[..]);
+
+ let mut poly = Poly1305::new(&key);
+ poly.input(&msg[0..32]);
+ poly.input(&msg[32..96]);
+ poly.input(&msg[96..112]);
+ poly.input(&msg[112..120]);
+ poly.input(&msg[120..124]);
+ poly.input(&msg[124..126]);
+ poly.input(&msg[126..127]);
+ poly.input(&msg[127..128]);
+ poly.input(&msg[128..129]);
+ poly.input(&msg[129..130]);
+ poly.input(&msg[130..131]);
+ poly.raw_result(&mut mac);
+ assert_eq!(&mac[..], &expected[..]);
+ }
+
+ #[test]
+ fn donna_self_test() {
+ let wrap_key = [
+ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ ];
+
+ let wrap_msg = [
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ ];
+
+ let wrap_mac = [
+ 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ ];
+
+ let mut mac = [0u8; 16];
+ poly1305(&wrap_key, &wrap_msg, &mut mac);
+ assert_eq!(&mac[..], &wrap_mac[..]);
+
+ let total_key = [
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xff,
+ 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ ];
+
+ let total_mac = [
+ 0x64, 0xaf, 0xe2, 0xe8, 0xd6, 0xad, 0x7b, 0xbd,
+ 0xd2, 0x87, 0xf9, 0x7c, 0x44, 0x62, 0x3d, 0x39,
+ ];
+
+ let mut tpoly = Poly1305::new(&total_key);
+ for i in 0..256 {
+ let key: Vec<u8> = repeat(i as u8).take(32).collect();
+ let msg: Vec<u8> = repeat(i as u8).take(256).collect();
+ let mut mac = [0u8; 16];
+ poly1305(&key[..], &msg[0..i], &mut mac);
+ tpoly.input(&mac);
+ }
+ tpoly.raw_result(&mut mac);
+ assert_eq!(&mac[..], &total_mac[..]);
+ }
+
+ #[test]
+ fn test_tls_vectors() {
+ // from http://tools.ietf.org/html/draft-agl-tls-chacha20poly1305-04
+ let key = b"this is 32-byte key for Poly1305";
+ let msg = [0u8; 32];
+ let expected = [
+ 0x49, 0xec, 0x78, 0x09, 0x0e, 0x48, 0x1e, 0xc6,
+ 0xc2, 0x6b, 0x33, 0xb9, 0x1c, 0xcc, 0x03, 0x07,
+ ];
+ let mut mac = [0u8; 16];
+ poly1305(key, &msg, &mut mac);
+ assert_eq!(&mac[..], &expected[..]);
+
+ let msg = b"Hello world!";
+ let expected= [
+ 0xa6, 0xf7, 0x45, 0x00, 0x8f, 0x81, 0xc9, 0x16,
+ 0xa2, 0x0d, 0xcc, 0x74, 0xee, 0xf2, 0xb2, 0xf0,
+ ];
+ poly1305(key, msg, &mut mac);
+ assert_eq!(&mac[..], &expected[..]);
+ }
+}
--- /dev/null
+use crate::crypto::chacha20::ChaCha20;
+use crate::crypto::chacha20poly1305rfc::ChaCha20Poly1305RFC;
+
+use crate::ln::msgs::DecodeError;
+use crate::util::ser::{FixedLengthReader, LengthRead, LengthReadableArgs, Readable, Writeable, Writer};
+use crate::io::{self, Read, Write};
+
+pub(crate) struct ChaChaReader<'a, R: io::Read> {
+ pub chacha: &'a mut ChaCha20,
+ pub read: R,
+}
+impl<'a, R: io::Read> io::Read for ChaChaReader<'a, R> {
+ fn read(&mut self, dest: &mut [u8]) -> Result<usize, io::Error> {
+ let res = self.read.read(dest)?;
+ if res > 0 {
+ self.chacha.process_in_place(&mut dest[0..res]);
+ }
+ Ok(res)
+ }
+}
+
+/// Enables the use of the serialization macros for objects that need to be simultaneously encrypted and
+/// serialized. This allows us to avoid an intermediate Vec allocation.
+pub(crate) struct ChaChaPolyWriteAdapter<'a, W: Writeable> {
+ pub rho: [u8; 32],
+ pub writeable: &'a W,
+}
+
+impl<'a, W: Writeable> ChaChaPolyWriteAdapter<'a, W> {
+ #[allow(unused)] // This will be used for onion messages soon
+ pub fn new(rho: [u8; 32], writeable: &'a W) -> ChaChaPolyWriteAdapter<'a, W> {
+ Self { rho, writeable }
+ }
+}
+
+impl<'a, T: Writeable> Writeable for ChaChaPolyWriteAdapter<'a, T> {
+ // Simultaneously write and encrypt Self::writeable.
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ let mut chacha = ChaCha20Poly1305RFC::new(&self.rho, &[0; 12], &[]);
+ let mut chacha_stream = ChaChaPolyWriter { chacha: &mut chacha, write: w };
+ self.writeable.write(&mut chacha_stream)?;
+ let mut tag = [0 as u8; 16];
+ chacha.finish_and_get_tag(&mut tag);
+ tag.write(w)?;
+
+ Ok(())
+ }
+}
+
+/// Enables the use of the serialization macros for objects that need to be simultaneously decrypted and
+/// deserialized. This allows us to avoid an intermediate Vec allocation.
+pub(crate) struct ChaChaPolyReadAdapter<R: Readable> {
+ pub readable: R,
+}
+
+impl<T: Readable> LengthReadableArgs<[u8; 32]> for ChaChaPolyReadAdapter<T> {
+ // Simultaneously read and decrypt an object from a LengthRead, storing it in Self::readable.
+ // LengthRead must be used instead of std::io::Read because we need the total length to separate
+ // out the tag at the end.
+ fn read<R: LengthRead>(mut r: &mut R, secret: [u8; 32]) -> Result<Self, DecodeError> {
+ if r.total_bytes() < 16 { return Err(DecodeError::InvalidValue) }
+
+ let mut chacha = ChaCha20Poly1305RFC::new(&secret, &[0; 12], &[]);
+ let decrypted_len = r.total_bytes() - 16;
+ let s = FixedLengthReader::new(&mut r, decrypted_len);
+ let mut chacha_stream = ChaChaPolyReader { chacha: &mut chacha, read: s };
+ let readable: T = Readable::read(&mut chacha_stream)?;
+ chacha_stream.read.eat_remaining()?;
+
+ let mut tag = [0 as u8; 16];
+ r.read_exact(&mut tag)?;
+ if !chacha.finish_and_check_tag(&tag) {
+ return Err(DecodeError::InvalidValue)
+ }
+
+ Ok(Self { readable })
+ }
+}
+
+
+/// Enables simultaneously reading and decrypting a ChaCha20Poly1305RFC stream from a std::io::Read.
+struct ChaChaPolyReader<'a, R: Read> {
+ pub chacha: &'a mut ChaCha20Poly1305RFC,
+ pub read: R,
+}
+
+impl<'a, R: Read> Read for ChaChaPolyReader<'a, R> {
+ // Decrypt bytes from Self::read into `dest`.
+ // `ChaCha20Poly1305RFC::finish_and_check_tag` must be called to check the tag after all reads
+ // complete.
+ fn read(&mut self, dest: &mut [u8]) -> Result<usize, io::Error> {
+ let res = self.read.read(dest)?;
+ if res > 0 {
+ self.chacha.decrypt_in_place(&mut dest[0..res]);
+ }
+ Ok(res)
+ }
+}
+
+/// Enables simultaneously writing and encrypting a byte stream into a Writer.
+struct ChaChaPolyWriter<'a, W: Writer> {
+ pub chacha: &'a mut ChaCha20Poly1305RFC,
+ pub write: &'a mut W,
+}
+
+impl<'a, W: Writer> Writer for ChaChaPolyWriter<'a, W> {
+ // Encrypt then write bytes from `src` into Self::write.
+ // `ChaCha20Poly1305RFC::finish_and_get_tag` can be called to retrieve the tag after all writes
+ // complete.
+ fn write_all(&mut self, src: &[u8]) -> Result<(), io::Error> {
+ let mut src_idx = 0;
+ while src_idx < src.len() {
+ let mut write_buffer = [0; 8192];
+ let bytes_written = (&mut write_buffer[..]).write(&src[src_idx..]).expect("In-memory writes can't fail");
+ self.chacha.encrypt_in_place(&mut write_buffer[..bytes_written]);
+ self.write.write_all(&write_buffer[..bytes_written])?;
+ src_idx += bytes_written;
+ }
+ Ok(())
+ }
+}
+
+
+#[cfg(test)]
+mod tests {
+ use crate::ln::msgs::DecodeError;
+ use super::{ChaChaPolyReadAdapter, ChaChaPolyWriteAdapter};
+ use crate::util::ser::{self, FixedLengthReader, LengthReadableArgs, Writeable};
+
+ // Used for for testing various lengths of serialization.
+ #[derive(Debug, PartialEq, Eq)]
+ struct TestWriteable {
+ field1: Vec<u8>,
+ field2: Vec<u8>,
+ field3: Vec<u8>,
+ }
+ impl_writeable_tlv_based!(TestWriteable, {
+ (1, field1, required_vec),
+ (2, field2, required_vec),
+ (3, field3, required_vec),
+ });
+
+ #[test]
+ fn test_chacha_stream_adapters() {
+ // Check that ChaChaPolyReadAdapter and ChaChaPolyWriteAdapter correctly encode and decode an
+ // encrypted object.
+ macro_rules! check_object_read_write {
+ ($obj: expr) => {
+ // First, serialize the object, encrypted with ChaCha20Poly1305.
+ let rho = [42; 32];
+ let writeable_len = $obj.serialized_length() as u64 + 16;
+ let write_adapter = ChaChaPolyWriteAdapter::new(rho, &$obj);
+ let encrypted_writeable_bytes = write_adapter.encode();
+ let encrypted_writeable = &encrypted_writeable_bytes[..];
+
+ // Now deserialize the object back and make sure it matches the original.
+ let mut rd = FixedLengthReader::new(encrypted_writeable, writeable_len);
+ let read_adapter = <ChaChaPolyReadAdapter<TestWriteable>>::read(&mut rd, rho).unwrap();
+ assert_eq!($obj, read_adapter.readable);
+ };
+ }
+
+ // Try a big object that will require multiple write buffers.
+ let big_writeable = TestWriteable {
+ field1: vec![43],
+ field2: vec![44; 4192],
+ field3: vec![45; 4192 + 1],
+ };
+ check_object_read_write!(big_writeable);
+
+ // Try a small object that fits into one write buffer.
+ let small_writeable = TestWriteable {
+ field1: vec![43],
+ field2: vec![44],
+ field3: vec![45],
+ };
+ check_object_read_write!(small_writeable);
+ }
+
+ fn do_chacha_stream_adapters_ser_macros() -> Result<(), DecodeError> {
+ let writeable = TestWriteable {
+ field1: vec![43],
+ field2: vec![44; 4192],
+ field3: vec![45; 4192 + 1],
+ };
+
+ // First, serialize the object into a TLV stream, encrypted with ChaCha20Poly1305.
+ let rho = [42; 32];
+ let write_adapter = ChaChaPolyWriteAdapter::new(rho, &writeable);
+ let mut writer = ser::VecWriter(Vec::new());
+ encode_tlv_stream!(&mut writer, {
+ (1, write_adapter, required),
+ });
+
+ // Now deserialize the object back and make sure it matches the original.
+ let mut read_adapter: Option<ChaChaPolyReadAdapter<TestWriteable>> = None;
+ decode_tlv_stream!(&writer.0[..], {
+ (1, read_adapter, (option: LengthReadableArgs, rho)),
+ });
+ assert_eq!(writeable, read_adapter.unwrap().readable);
+
+ Ok(())
+ }
+
+ #[test]
+ fn chacha_stream_adapters_ser_macros() {
+ // Test that our stream adapters work as expected with the TLV macros.
+ // This also serves to test the `option: $trait` variant of the `_decode_tlv` ser macro.
+ do_chacha_stream_adapters_ser_macros().unwrap()
+ }
+}
--- /dev/null
+use bitcoin::hashes::{Hash, HashEngine};
+use bitcoin::hashes::hmac::{Hmac, HmacEngine};
+use bitcoin::hashes::sha256::Hash as Sha256;
+use bitcoin::secp256k1::{Message, Secp256k1, SecretKey, ecdsa::Signature, Signing};
+
+use crate::sign::EntropySource;
+
+use core::ops::Deref;
+
+macro_rules! hkdf_extract_expand {
+ ($salt: expr, $ikm: expr) => {{
+ let mut hmac = HmacEngine::<Sha256>::new($salt);
+ hmac.input($ikm);
+ let prk = Hmac::from_engine(hmac).to_byte_array();
+ let mut hmac = HmacEngine::<Sha256>::new(&prk[..]);
+ hmac.input(&[1; 1]);
+ let t1 = Hmac::from_engine(hmac).to_byte_array();
+ let mut hmac = HmacEngine::<Sha256>::new(&prk[..]);
+ hmac.input(&t1);
+ hmac.input(&[2; 1]);
+ (t1, Hmac::from_engine(hmac).to_byte_array(), prk)
+ }};
+ ($salt: expr, $ikm: expr, 2) => {{
+ let (k1, k2, _) = hkdf_extract_expand!($salt, $ikm);
+ (k1, k2)
+ }};
+ ($salt: expr, $ikm: expr, 5) => {{
+ let (k1, k2, prk) = hkdf_extract_expand!($salt, $ikm);
+
+ let mut hmac = HmacEngine::<Sha256>::new(&prk[..]);
+ hmac.input(&k2);
+ hmac.input(&[3; 1]);
+ let k3 = Hmac::from_engine(hmac).to_byte_array();
+
+ let mut hmac = HmacEngine::<Sha256>::new(&prk[..]);
+ hmac.input(&k3);
+ hmac.input(&[4; 1]);
+ let k4 = Hmac::from_engine(hmac).to_byte_array();
+
+ let mut hmac = HmacEngine::<Sha256>::new(&prk[..]);
+ hmac.input(&k4);
+ hmac.input(&[5; 1]);
+ let k5 = Hmac::from_engine(hmac).to_byte_array();
+
+ (k1, k2, k3, k4, k5)
+ }}
+}
+
+pub fn hkdf_extract_expand_twice(salt: &[u8], ikm: &[u8]) -> ([u8; 32], [u8; 32]) {
+ hkdf_extract_expand!(salt, ikm, 2)
+}
+
+pub fn hkdf_extract_expand_5x(salt: &[u8], ikm: &[u8]) -> ([u8; 32], [u8; 32], [u8; 32], [u8; 32], [u8; 32]) {
+ hkdf_extract_expand!(salt, ikm, 5)
+}
+
+#[inline]
+pub fn sign<C: Signing>(ctx: &Secp256k1<C>, msg: &Message, sk: &SecretKey) -> Signature {
+ #[cfg(feature = "grind_signatures")]
+ let sig = ctx.sign_ecdsa_low_r(msg, sk);
+ #[cfg(not(feature = "grind_signatures"))]
+ let sig = ctx.sign_ecdsa(msg, sk);
+ sig
+}
+
+#[inline]
+#[allow(unused_variables)]
+pub fn sign_with_aux_rand<C: Signing, ES: Deref>(
+ ctx: &Secp256k1<C>, msg: &Message, sk: &SecretKey, entropy_source: &ES
+) -> Signature where ES::Target: EntropySource {
+ #[cfg(feature = "grind_signatures")]
+ let sig = loop {
+ let sig = ctx.sign_ecdsa_with_noncedata(msg, sk, &entropy_source.get_secure_random_bytes());
+ if sig.serialize_compact()[0] < 0x80 {
+ break sig;
+ }
+ };
+ #[cfg(all(not(feature = "grind_signatures"), not(feature = "_test_vectors")))]
+ let sig = ctx.sign_ecdsa_with_noncedata(msg, sk, &entropy_source.get_secure_random_bytes());
+ #[cfg(all(not(feature = "grind_signatures"), feature = "_test_vectors"))]
+ let sig = sign(ctx, msg, sk);
+ sig
+}
use crate::ln::features::ChannelTypeFeatures;
use crate::ln::msgs;
use crate::ln::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret};
+use crate::chain::transaction;
use crate::routing::gossip::NetworkUpdate;
use crate::util::errors::APIError;
use crate::util::ser::{BigSize, FixedLengthReader, Writeable, Writer, MaybeReadable, Readable, RequiredWrapper, UpgradableRequired, WithoutLength};
/// replies. Handlers should connect to the node otherwise any buffered messages may be lost.
///
/// [`OnionMessage`]: msgs::OnionMessage
- /// [`MessageRouter`]: crate::onion_message::MessageRouter
- /// [`Destination`]: crate::onion_message::Destination
+ /// [`MessageRouter`]: crate::onion_message::messenger::MessageRouter
+ /// [`Destination`]: crate::onion_message::messenger::Destination
/// [`OnionMessageHandler`]: crate::ln::msgs::OnionMessageHandler
ConnectionNeeded {
/// The node id for the node needing a connection.
///
/// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
/// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
- ChannelClosed {
+ ChannelClosed {
/// The `channel_id` of the channel which has been closed. Note that on-chain transactions
/// resolving the channel are likely still awaiting confirmation.
channel_id: ChannelId,
///
/// This field will be `None` for objects serialized prior to LDK 0.0.117.
channel_capacity_sats: Option<u64>,
+ /// The original channel funding TXO; this helps checking for the existence and confirmation
+ /// status of the closing tx.
+ /// Note that for instances serialized in v0.0.119 or prior this will be missing (None).
+ channel_funding_txo: Option<transaction::OutPoint>,
},
/// Used to indicate to the user that they can abandon the funding transaction and recycle the
/// inputs for another purpose.
});
},
&Event::ChannelClosed { ref channel_id, ref user_channel_id, ref reason,
- ref counterparty_node_id, ref channel_capacity_sats
+ ref counterparty_node_id, ref channel_capacity_sats, ref channel_funding_txo
} => {
9u8.write(writer)?;
// `user_channel_id` used to be a single u64 value. In order to remain backwards
(3, user_channel_id_high, required),
(5, counterparty_node_id, option),
(7, channel_capacity_sats, option),
+ (9, channel_funding_txo, option),
});
},
&Event::DiscardFunding { ref channel_id, ref transaction } => {
let mut user_channel_id_high_opt: Option<u64> = None;
let mut counterparty_node_id = None;
let mut channel_capacity_sats = None;
+ let mut channel_funding_txo = None;
read_tlv_fields!(reader, {
(0, channel_id, required),
(1, user_channel_id_low_opt, option),
(3, user_channel_id_high_opt, option),
(5, counterparty_node_id, option),
(7, channel_capacity_sats, option),
+ (9, channel_funding_txo, option),
});
// `user_channel_id` used to be a single u64 value. In order to remain
((user_channel_id_high_opt.unwrap_or(0) as u128) << 64);
Ok(Some(Event::ChannelClosed { channel_id, user_channel_id, reason: _init_tlv_based_struct_field!(reason, upgradable_required),
- counterparty_node_id, channel_capacity_sats }))
+ counterparty_node_id, channel_capacity_sats, channel_funding_txo }))
};
f()
},
pub mod blinded_path;
pub mod events;
+pub(crate) mod crypto;
+
#[cfg(feature = "std")]
/// Re-export of either `core2::io` or `std::io`, depending on the `std` feature flag.
pub use std::io;
use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
use crate::blinded_path::BlindedPath;
use crate::blinded_path::payment::{ForwardNode, ForwardTlvs, PaymentConstraints, PaymentRelay, ReceiveTlvs};
-use crate::events::{HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
+use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PaymentFailureReason};
use crate::ln::PaymentSecret;
use crate::ln::channelmanager;
use crate::ln::channelmanager::{PaymentId, RecipientOnionFields};
use crate::ln::onion_utils;
use crate::ln::onion_utils::INVALID_ONION_BLINDING;
use crate::ln::outbound_payment::Retry;
+use crate::offers::invoice::BlindedPayInfo;
use crate::prelude::*;
use crate::routing::router::{Payee, PaymentParameters, RouteParameters};
use crate::util::config::UserConfig;
use crate::util::test_utils;
-pub fn get_blinded_route_parameters(
- amt_msat: u64, payment_secret: PaymentSecret, node_ids: Vec<PublicKey>,
+fn blinded_payment_path(
+ payment_secret: PaymentSecret, node_ids: Vec<PublicKey>,
channel_upds: &[&msgs::UnsignedChannelUpdate], keys_manager: &test_utils::TestKeysInterface
-) -> RouteParameters {
+) -> (BlindedPayInfo, BlindedPath) {
let mut intermediate_nodes = Vec::new();
for (node_id, chan_upd) in node_ids.iter().zip(channel_upds) {
intermediate_nodes.push(ForwardNode {
},
};
let mut secp_ctx = Secp256k1::new();
- let blinded_path = BlindedPath::new_for_payment(
+ BlindedPath::new_for_payment(
&intermediate_nodes[..], *node_ids.last().unwrap(), payee_tlvs,
channel_upds.last().unwrap().htlc_maximum_msat, keys_manager, &secp_ctx
- ).unwrap();
+ ).unwrap()
+}
+pub fn get_blinded_route_parameters(
+ amt_msat: u64, payment_secret: PaymentSecret, node_ids: Vec<PublicKey>,
+ channel_upds: &[&msgs::UnsignedChannelUpdate], keys_manager: &test_utils::TestKeysInterface
+) -> RouteParameters {
RouteParameters::from_payment_params_and_value(
- PaymentParameters::blinded(vec![blinded_path]), amt_msat
+ PaymentParameters::blinded(vec![
+ blinded_payment_path(payment_secret, node_ids, channel_upds, keys_manager)
+ ]), amt_msat
)
}
claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
}
+#[test]
+fn three_hop_blinded_path_success() {
+ let chanmon_cfgs = create_chanmon_cfgs(5);
+ let node_cfgs = create_node_cfgs(5, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, None, None, None, None]);
+ let mut nodes = create_network(5, &node_cfgs, &node_chanmgrs);
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0);
+ let chan_upd_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).0.contents;
+ let chan_upd_3_4 = create_announced_chan_between_nodes_with_value(&nodes, 3, 4, 1_000_000, 0).0.contents;
+
+ let amt_msat = 5000;
+ let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[4], Some(amt_msat), None);
+ let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+ nodes.iter().skip(2).map(|n| n.node.get_our_node_id()).collect(),
+ &[&chan_upd_2_3, &chan_upd_3_4], &chanmon_cfgs[4].keys_manager);
+
+ nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+ check_added_monitors(&nodes[0], 1);
+ pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2], &nodes[3], &nodes[4]]], amt_msat, payment_hash, payment_secret);
+ claim_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3], &nodes[4]], payment_preimage);
+}
+
#[derive(PartialEq)]
enum ReceiveCheckFail {
// The recipient fails the payment upon `PaymentClaimable`.
};
let amt_msat = 5000;
- let final_cltv_delta = if check == ReceiveCheckFail::ProcessPendingHTLCsCheck {
+ let excess_final_cltv_delta_opt = if check == ReceiveCheckFail::ProcessPendingHTLCsCheck {
// Set the final CLTV expiry too low to trigger the failure in process_pending_htlc_forwards.
Some(TEST_FINAL_CLTV as u16 - 2)
} else { None };
- let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), final_cltv_delta);
+ let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), excess_final_cltv_delta_opt);
let mut route_params = get_blinded_route_parameters(amt_msat, payment_secret,
nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2],
&chanmon_cfgs[2].keys_manager);
let route = if check == ReceiveCheckFail::ProcessPendingHTLCsCheck {
let mut route = get_route(&nodes[0], &route_params).unwrap();
// Set the final CLTV expiry too low to trigger the failure in process_pending_htlc_forwards.
- route.paths[0].blinded_tail.as_mut().map(|bt| bt.excess_final_cltv_expiry_delta = TEST_FINAL_CLTV - 2);
+ route.paths[0].hops.last_mut().map(|h| h.cltv_expiry_delta += excess_final_cltv_delta_opt.unwrap() as u32);
+ route.paths[0].blinded_tail.as_mut().map(|bt| bt.excess_final_cltv_expiry_delta = excess_final_cltv_delta_opt.unwrap() as u32);
route
} else if check == ReceiveCheckFail::PaymentConstraints {
// Create a blinded path where the receiver's encrypted payload has an htlc_minimum_msat that is
commitment_signed_dance!(nodes[2], nodes[1], (), false, true, false, false);
},
ReceiveCheckFail::ProcessPendingHTLCsCheck => {
+ assert_eq!(payment_event_1_2.msgs[0].cltv_expiry, nodes[0].best_block_info().1 + 1 + excess_final_cltv_delta_opt.unwrap() as u32);
nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]);
check_added_monitors!(nodes[2], 0);
do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true);
expect_payment_failed_conditions(&nodes[0], payment_hash, false,
PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
}
+
+#[test]
+fn blinded_path_retries() {
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ // Make one blinded path's fees slightly higher so they are tried in a deterministic order.
+ let mut higher_fee_chan_cfg = test_default_channel_config();
+ higher_fee_chan_cfg.channel_config.forwarding_fee_base_msat += 1;
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, Some(higher_fee_chan_cfg), None]);
+ let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ // Create this network topology so nodes[0] has a blinded route hint to retry over.
+ // n1
+ // / \
+ // n0 n3
+ // \ /
+ // n2
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1_000_000, 0);
+ let chan_1_3 = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 1_000_000, 0);
+ let chan_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0);
+
+ let amt_msat = 5000;
+ let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[3], Some(amt_msat), None);
+ let route_params = {
+ let pay_params = PaymentParameters::blinded(
+ vec![
+ blinded_payment_path(payment_secret,
+ vec![nodes[1].node.get_our_node_id(), nodes[3].node.get_our_node_id()], &[&chan_1_3.0.contents],
+ &chanmon_cfgs[3].keys_manager
+ ),
+ blinded_payment_path(payment_secret,
+ vec![nodes[2].node.get_our_node_id(), nodes[3].node.get_our_node_id()], &[&chan_2_3.0.contents],
+ &chanmon_cfgs[3].keys_manager
+ ),
+ ]
+ )
+ .with_bolt12_features(channelmanager::provided_bolt12_invoice_features(&UserConfig::default()))
+ .unwrap();
+ RouteParameters::from_payment_params_and_value(pay_params, amt_msat)
+ };
+
+ nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(2)).unwrap();
+ check_added_monitors(&nodes[0], 1);
+ pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]]], amt_msat, payment_hash, payment_secret);
+
+ macro_rules! fail_payment_back {
+ ($intro_node: expr) => {
+ nodes[3].node.fail_htlc_backwards(&payment_hash);
+ expect_pending_htlcs_forwardable_conditions(
+ nodes[3].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]
+ );
+ nodes[3].node.process_pending_htlc_forwards();
+ check_added_monitors!(nodes[3], 1);
+
+ let updates = get_htlc_update_msgs!(nodes[3], $intro_node.node.get_our_node_id());
+ assert_eq!(updates.update_fail_malformed_htlcs.len(), 1);
+ let update_malformed = &updates.update_fail_malformed_htlcs[0];
+ assert_eq!(update_malformed.sha256_of_onion, [0; 32]);
+ assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING);
+ $intro_node.node.handle_update_fail_malformed_htlc(&nodes[3].node.get_our_node_id(), update_malformed);
+ do_commitment_signed_dance(&$intro_node, &nodes[3], &updates.commitment_signed, true, false);
+
+ let updates = get_htlc_update_msgs!($intro_node, nodes[0].node.get_our_node_id());
+ assert_eq!(updates.update_fail_htlcs.len(), 1);
+ nodes[0].node.handle_update_fail_htlc(&$intro_node.node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+ do_commitment_signed_dance(&nodes[0], &$intro_node, &updates.commitment_signed, false, false);
+
+ let mut events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ match events[0] {
+ Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, .. } => {
+ assert_eq!(payment_hash, ev_payment_hash);
+ assert_eq!(payment_failed_permanently, false);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ Event::PendingHTLCsForwardable { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+ nodes[0].node.process_pending_htlc_forwards();
+ }
+ }
+
+ fail_payment_back!(nodes[1]);
+
+ // Pass the retry along.
+ check_added_monitors!(nodes[0], 1);
+ let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 1);
+ pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], amt_msat, payment_hash, Some(payment_secret), msg_events.pop().unwrap(), true, None);
+
+ fail_payment_back!(nodes[2]);
+ let evs = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(evs.len(), 1);
+ match evs[0] {
+ Event::PaymentFailed { payment_hash: ev_payment_hash, reason, .. } => {
+ assert_eq!(ev_payment_hash, payment_hash);
+ // We have 1 retry attempt remaining, but we're out of blinded paths to try.
+ assert_eq!(reason, Some(PaymentFailureReason::RouteNotFound));
+ },
+ _ => panic!()
+ }
+}
use core::ops::Deref;
use crate::chain;
use crate::ln::features::ChannelTypeFeatures;
-use crate::util::crypto::{sign, sign_with_aux_rand};
+use crate::crypto::utils::{sign, sign_with_aux_rand};
use super::channel_keys::{DelayedPaymentBasepoint, DelayedPaymentKey, HtlcKey, HtlcBasepoint, RevocationKey, RevocationBasepoint};
/// Maximum number of one-way in-flight HTLC (protocol-level value).
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields};
use crate::ln::channel::{AnnouncementSigsState, ChannelPhase};
-use crate::ln::msgs;
+use crate::ln::{msgs, ChannelId};
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
use crate::util::test_channel_signer::TestChannelSigner;
use crate::util::errors::APIError;
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
- let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
+ let channel_id = ChannelId::v1_from_funding_outpoint(OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index });
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
check_added_monitors!(nodes[1], 1);
pub(crate) struct ShutdownResult {
pub(crate) closure_reason: ClosureReason,
/// A channel monitor update to apply.
- pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
+ pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelId, ChannelMonitorUpdate)>,
/// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
/// An unbroadcasted batch funding transaction id. The closure of this channel should be
pub(crate) channel_capacity_satoshis: u64,
pub(crate) counterparty_node_id: PublicKey,
pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
+ pub(crate) channel_funding_txo: Option<OutPoint>,
}
/// If the majority of the channels funds are to the fundee and the initiator holds only just
// See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
if !self.channel_state.is_pre_funded_state() {
self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
- Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
+ Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate {
update_id: self.latest_monitor_update_id,
counterparty_node_id: Some(self.counterparty_node_id),
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
+ channel_id: Some(self.channel_id()),
}))
} else { None }
} else { None };
channel_capacity_satoshis: self.channel_value_satoshis,
counterparty_node_id: self.counterparty_node_id,
unbroadcasted_funding_tx,
+ channel_funding_txo: self.get_funding_txo(),
}
}
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
payment_preimage: payment_preimage_arg.clone(),
}],
+ channel_id: Some(self.context.channel_id()),
};
if !self.context.channel_state.can_generate_new_commitment() {
htlc_outputs: htlcs_and_sigs,
claimed_htlcs,
nondust_htlc_sources,
- }]
+ }],
+ channel_id: Some(self.context.channel_id()),
};
self.context.cur_holder_commitment_transaction_number -= 1;
update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
counterparty_node_id: Some(self.context.counterparty_node_id),
updates: Vec::new(),
+ channel_id: Some(self.context.channel_id()),
};
let mut htlc_updates = Vec::new();
idx: self.context.cur_counterparty_commitment_transaction_number + 1,
secret: msg.per_commitment_secret,
}],
+ channel_id: Some(self.context.channel_id()),
};
// Update state now that we've passed all the can-fail calls...
updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
scriptpubkey: self.get_closing_scriptpubkey(),
}],
+ channel_id: Some(self.context.channel_id()),
};
self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
self.push_ret_blockable_mon_update(monitor_update)
channel_capacity_satoshis: self.context.channel_value_satoshis,
counterparty_node_id: self.context.counterparty_node_id,
unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
+ channel_funding_txo: self.context.get_funding_txo(),
};
let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
self.context.channel_state = ChannelState::ShutdownComplete;
channel_capacity_satoshis: self.context.channel_value_satoshis,
counterparty_node_id: self.context.counterparty_node_id,
unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
+ channel_funding_txo: self.context.get_funding_txo(),
};
self.context.channel_state = ChannelState::ShutdownComplete;
self.context.update_time_counter += 1;
feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
- }]
+ }],
+ channel_id: Some(self.context.channel_id()),
};
self.context.channel_state.set_awaiting_remote_revoke();
monitor_update
updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
scriptpubkey: self.get_closing_scriptpubkey(),
}],
+ channel_id: Some(self.context.channel_id()),
};
self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
self.push_ret_blockable_mon_update(monitor_update)
// Now that we're past error-generating stuff, update our local state:
self.context.channel_state = ChannelState::FundingNegotiated;
- self.context.channel_id = funding_txo.to_channel_id();
+ self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
// If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
// We can skip this if it is a zero-conf channel.
&self.context.channel_transaction_parameters,
funding_redeemscript.clone(), self.context.channel_value_satoshis,
obscure_factor,
- holder_commitment_tx, best_block, self.context.counterparty_node_id);
+ holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
channel_monitor.provide_initial_counterparty_commitment_tx(
counterparty_initial_bitcoin_tx.txid, Vec::new(),
self.context.cur_counterparty_commitment_transaction_number,
// Now that we're past error-generating stuff, update our local state:
self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
- self.context.channel_id = funding_txo.to_channel_id();
+ self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
self.context.cur_counterparty_commitment_transaction_number -= 1;
self.context.cur_holder_commitment_transaction_number -= 1;
&self.context.channel_transaction_parameters,
funding_redeemscript.clone(), self.context.channel_value_satoshis,
obscure_factor,
- holder_commitment_tx, best_block, self.context.counterparty_node_id);
+ holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
channel_monitor.provide_initial_counterparty_commitment_tx(
counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
self.context.cur_counterparty_commitment_transaction_number + 1,
//! ChannelId definition.
+use crate::chain::transaction::OutPoint;
+use crate::io;
use crate::ln::msgs::DecodeError;
use crate::sign::EntropySource;
use crate::util::ser::{Readable, Writeable, Writer};
-use crate::io;
+use bitcoin::hashes::Hash as _;
use core::fmt;
use core::ops::Deref;
Self(res)
}
+ /// Create _v1_ channel ID from a funding tx outpoint
+ pub fn v1_from_funding_outpoint(outpoint: OutPoint) -> Self {
+ Self::v1_from_funding_txid(outpoint.txid.as_byte_array(), outpoint.index)
+ }
+
/// Create a _temporary_ channel ID randomly, based on an entropy source.
pub fn temporary_from_entropy_source<ES: Deref>(entropy_source: &ES) -> Self
where ES::Target: EntropySource {
#[cfg(any(feature = "_test_utils", test))]
use crate::ln::features::Bolt11InvoiceFeatures;
use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
-use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundOnionErr, NextPacketDetails};
+use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundHTLCErr, NextPacketDetails};
use crate::ln::msgs;
use crate::ln::onion_utils;
use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING};
use crate::offers::offer::{DerivedMetadata, Offer, OfferBuilder};
use crate::offers::parse::Bolt12SemanticError;
use crate::offers::refund::{Refund, RefundBuilder};
-use crate::onion_message::{Destination, MessageRouter, OffersMessage, OffersMessageHandler, PendingOnionMessage, new_pending_onion_message};
+use crate::onion_message::messenger::{Destination, MessageRouter, PendingOnionMessage, new_pending_onion_message};
+use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
/// onion payload if we're the introduction node. Useful for calculating the next hop's
/// [`msgs::UpdateAddHTLC::blinding_point`].
pub inbound_blinding_point: PublicKey,
- // Another field will be added here when we support forwarding as a non-intro node.
+ /// If needed, this determines how this HTLC should be failed backwards, based on whether we are
+ /// the introduction node.
+ pub failure: BlindedFailure,
}
impl PendingHTLCRouting {
// Used to override the onion failure code and data if the HTLC is blinded.
fn blinded_failure(&self) -> Option<BlindedFailure> {
- // TODO: needs update when we support forwarding blinded HTLCs as non-intro node
match self {
- Self::Forward { blinded: Some(_), .. } => Some(BlindedFailure::FromIntroductionNode),
+ Self::Forward { blinded: Some(BlindedForward { failure, .. }), .. } => Some(*failure),
Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
_ => None,
}
// Note that this may be an outbound SCID alias for the associated channel.
prev_short_channel_id: u64,
prev_htlc_id: u64,
+ prev_channel_id: ChannelId,
prev_funding_outpoint: OutPoint,
prev_user_channel_id: u128,
}
},
}
-// Used for failing blinded HTLCs backwards correctly.
+/// Whether this blinded HTLC is being failed backwards by the introduction node or a blinded node,
+/// which determines the failure message that should be used.
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
-enum BlindedFailure {
+pub enum BlindedFailure {
+ /// This HTLC is being failed backwards by the introduction node, and thus should be failed with
+ /// [`msgs::UpdateFailHTLC`] and error code `0x8000|0x4000|24`.
FromIntroductionNode,
+ /// This HTLC is being failed backwards by a blinded node within the path, and thus should be
+ /// failed with [`msgs::UpdateFailMalformedHTLC`] and error code `0x8000|0x4000|24`.
FromBlindedNode,
}
incoming_packet_shared_secret: [u8; 32],
phantom_shared_secret: Option<[u8; 32]>,
blinded_failure: Option<BlindedFailure>,
+ channel_id: ChannelId,
// This field is consumed by `claim_funds_from_hop()` when updating a force-closed backwards
// channel with a preimage provided by the forward channel.
impl From<&ClaimableHTLC> for events::ClaimedHTLC {
fn from(val: &ClaimableHTLC) -> Self {
events::ClaimedHTLC {
- channel_id: val.prev_hop.outpoint.to_channel_id(),
+ channel_id: val.prev_hop.channel_id,
user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0),
cltv_expiry: val.cltv_expiry,
value_msat: val.value,
///
/// Note that any such events are lost on shutdown, so in general they must be updates which
/// are regenerated on startup.
- ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
+ ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelId, ChannelMonitorUpdate)),
/// Handle a ChannelMonitorUpdate which may or may not close the channel and may unblock the
/// channel to continue normal operation.
///
MonitorUpdateRegeneratedOnStartup {
counterparty_node_id: PublicKey,
funding_txo: OutPoint,
+ channel_id: ChannelId,
update: ChannelMonitorUpdate
},
/// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have
/// outbound edge.
EmitEventAndFreeOtherChannel {
event: events::Event,
- downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, RAAMonitorUpdateBlockingAction)>,
+ downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, ChannelId, RAAMonitorUpdateBlockingAction)>,
},
/// Indicates we should immediately resume the operation of another channel, unless there is
/// some other reason why the channel is blocked. In practice this simply means immediately
downstream_counterparty_node_id: PublicKey,
downstream_funding_outpoint: OutPoint,
blocking_action: RAAMonitorUpdateBlockingAction,
+ downstream_channel_id: ChannelId,
},
}
(0, downstream_counterparty_node_id, required),
(2, downstream_funding_outpoint, required),
(4, blocking_action, required),
+ // Note that by the time we get past the required read above, downstream_funding_outpoint will be
+ // filled in, so we can safely unwrap it here.
+ (5, downstream_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(downstream_funding_outpoint.0.unwrap()))),
},
(2, EmitEventAndFreeOtherChannel) => {
(0, event, upgradable_required),
ReleaseRAAChannelMonitorUpdate {
counterparty_node_id: PublicKey,
channel_funding_outpoint: OutPoint,
+ channel_id: ChannelId,
},
}
impl_writeable_tlv_based_enum!(EventCompletionAction,
(0, ReleaseRAAChannelMonitorUpdate) => {
(0, channel_funding_outpoint, required),
(2, counterparty_node_id, required),
+ // Note that by the time we get past the required read above, channel_funding_outpoint will be
+ // filled in, so we can safely unwrap it here.
+ (3, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.0.unwrap()))),
};
);
impl RAAMonitorUpdateBlockingAction {
fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
Self::ForwardedPaymentInboundClaim {
- channel_id: prev_hop.outpoint.to_channel_id(),
+ channel_id: prev_hop.channel_id,
htlc_id: prev_hop.htlc_id,
}
}
/// The Channel's funding transaction output, if we've negotiated the funding transaction with
/// our counterparty already.
///
- /// Note that, if this has been set, `channel_id` will be equivalent to
- /// `funding_txo.unwrap().to_channel_id()`.
+ /// Note that, if this has been set, `channel_id` for V1-established channels will be equivalent to
+ /// `ChannelId::v1_from_funding_outpoint(funding_txo.unwrap())`.
pub funding_txo: Option<OutPoint>,
/// The features which this channel operates with. See individual features for more info.
///
handle_new_monitor_update!($self, $update_res, $chan, _internal,
handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
};
- ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
+ ($self: ident, $funding_txo: expr, $channel_id: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
let in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
.or_insert_with(Vec::new);
// During startup, we push monitor updates as background events through to here in
// Update the monitor with the shutdown script if necessary.
if let Some(monitor_update) = monitor_update_opt.take() {
- handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+ handle_new_monitor_update!(self, funding_txo_opt.unwrap(), *channel_id, monitor_update,
peer_state_lock, peer_state, per_peer_state, chan);
}
} else {
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
- if let Some((_, funding_txo, monitor_update)) = shutdown_res.monitor_update {
+ if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
// There isn't anything we can do if we get an update failure - we're already
// force-closing. The monitor update on the required in-memory copy should broadcast
// the latest local state, which is the best we can do anyway. Thus, it is safe to
reason: shutdown_res.closure_reason,
counterparty_node_id: Some(shutdown_res.counterparty_node_id),
channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis),
+ channel_funding_txo: shutdown_res.channel_funding_txo,
}, None));
if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx {
let is_intro_node_forward = match next_hop {
onion_utils::Hop::Forward {
- // TODO: update this when we support blinded forwarding as non-intro node
- next_hop_data: msgs::InboundOnionPayload::BlindedForward { .. }, ..
+ next_hop_data: msgs::InboundOnionPayload::BlindedForward {
+ intro_node_blinding_point: Some(_), ..
+ }, ..
} => true,
_ => false,
};
// delay) once they've send us a commitment_signed!
PendingHTLCStatus::Forward(info)
},
- Err(InboundOnionErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
+ Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
}
},
onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
match create_fwd_pending_htlc_info(msg, next_hop_data, next_hop_hmac,
new_packet_bytes, shared_secret, next_packet_pubkey_opt) {
Ok(info) => PendingHTLCStatus::Forward(info),
- Err(InboundOnionErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
+ Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
}
}
}
}, onion_packet, None, &self.fee_estimator, &&logger);
match break_chan_phase_entry!(self, send_res, chan_phase_entry) {
Some(monitor_update) => {
- match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
+ match handle_new_monitor_update!(self, funding_txo, channel_id, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
false => {
// Note that MonitorUpdateInProgress here indicates (per function
// docs) that we will resend the commitment update once monitor
}
let outpoint = OutPoint { txid: tx.txid(), index: output_index.unwrap() };
if let Some(funding_batch_state) = funding_batch_state.as_mut() {
- funding_batch_state.push((outpoint.to_channel_id(), *counterparty_node_id, false));
+ // TODO(dual_funding): We only do batch funding for V1 channels at the moment, but we'll probably
+ // need to fix this somehow to not rely on using the outpoint for the channel ID if we
+ // want to support V2 batching here as well.
+ funding_batch_state.push((ChannelId::v1_from_funding_outpoint(outpoint), *counterparty_node_id, false));
}
Ok(outpoint)
})
});
}
}
+ mem::drop(funding_batch_states);
for shutdown_result in shutdown_results.drain(..) {
self.finish_close_channel(shutdown_result);
}
let mut per_source_pending_forward = [(
payment.prev_short_channel_id,
payment.prev_funding_outpoint,
+ payment.prev_channel_id,
payment.prev_user_channel_id,
vec![(pending_htlc_info, payment.prev_htlc_id)]
)];
short_channel_id: payment.prev_short_channel_id,
user_channel_id: Some(payment.prev_user_channel_id),
outpoint: payment.prev_funding_outpoint,
+ channel_id: payment.prev_channel_id,
htlc_id: payment.prev_htlc_id,
incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret,
phantom_shared_secret: None,
let mut new_events = VecDeque::new();
let mut failed_forwards = Vec::new();
- let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
+ let mut phantom_receives: Vec<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
{
let mut forward_htlcs = HashMap::new();
mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
for forward_info in pending_forwards.drain(..) {
match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
- forward_info: PendingHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+ prev_user_channel_id, forward_info: PendingHTLCInfo {
routing, incoming_shared_secret, payment_hash, outgoing_amt_msat,
outgoing_cltv_value, ..
}
}) => {
macro_rules! failure_handler {
($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
- let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_funding_outpoint.to_channel_id()));
+ let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id));
log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
+ channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
outgoing_cltv_value, Some(phantom_shared_secret), false, None,
current_height, self.default_configuration.accept_mpp_keysend)
{
- Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])),
- Err(InboundOnionErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
+ Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, vec![(info, prev_htlc_id)])),
+ Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
}
},
_ => panic!(),
for forward_info in pending_forwards.drain(..) {
let queue_fail_htlc_res = match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
- forward_info: PendingHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+ prev_user_channel_id, forward_info: PendingHTLCInfo {
incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
routing: PendingHTLCRouting::Forward {
onion_packet, blinded, ..
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
+ channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
// Phantom payments are only PendingHTLCRouting::Receive.
phantom_shared_secret: None,
- blinded_failure: blinded.map(|_| BlindedFailure::FromIntroductionNode),
+ blinded_failure: blinded.map(|b| b.failure),
});
let next_blinding_point = blinded.and_then(|b| {
let encrypted_tlvs_ss = self.node_signer.ecdh(
'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
- forward_info: PendingHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+ prev_user_channel_id, forward_info: PendingHTLCInfo {
routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat,
skimmed_fee_msat, ..
}
prev_hop: HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
+ channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: $htlc.prev_hop.short_channel_id,
user_channel_id: $htlc.prev_hop.user_channel_id,
+ channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: $htlc.prev_hop.htlc_id,
incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
#[allow(unused_assignments)] {
committed_to_claimable = true;
}
- let prev_channel_id = prev_funding_outpoint.to_channel_id();
htlcs.push(claimable_htlc);
let amount_msat = htlcs.iter().map(|htlc| htlc.value).sum();
htlcs.iter_mut().for_each(|htlc| htlc.total_value_received = Some(amount_msat));
for event in background_events.drain(..) {
match event {
- BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
+ BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, _channel_id, update)) => {
// The channel has already been closed, so no use bothering to care about the
// monitor updating completing.
let _ = self.chain_monitor.update_channel(funding_txo, &update);
},
- BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
+ BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
let mut updated_chan = false;
{
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
+ match peer_state.channel_by_id.entry(channel_id) {
hash_map::Entry::Occupied(mut chan_phase) => {
if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
updated_chan = true;
- handle_new_monitor_update!(self, funding_txo, update.clone(),
+ handle_new_monitor_update!(self, funding_txo, channel_id, update.clone(),
peer_state_lock, peer_state, per_peer_state, chan);
} else {
debug_assert!(false, "We shouldn't have an update for a non-funded channel");
},
HTLCSource::PreviousHopData(HTLCPreviousHopData {
ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret,
- ref phantom_shared_secret, ref outpoint, ref blinded_failure, ..
+ ref phantom_shared_secret, outpoint: _, ref blinded_failure, ref channel_id, ..
}) => {
log_trace!(
- WithContext::from(&self.logger, None, Some(outpoint.to_channel_id())),
+ WithContext::from(&self.logger, None, Some(*channel_id)),
"Failing {}HTLC with payment_hash {} backwards from us: {:?}",
if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error
);
if push_forward_ev { self.push_pending_forwards_ev(); }
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push_back((events::Event::HTLCHandlingFailed {
- prev_channel_id: outpoint.to_channel_id(),
+ prev_channel_id: *channel_id,
failed_next_destination: destination,
}, None));
},
}
if valid_mpp {
for htlc in sources.drain(..) {
- let prev_hop_chan_id = htlc.prev_hop.outpoint.to_channel_id();
+ let prev_hop_chan_id = htlc.prev_hop.channel_id;
if let Err((pk, err)) = self.claim_funds_from_hop(
htlc.prev_hop, payment_preimage,
|_, definitely_duplicate| {
{
let per_peer_state = self.per_peer_state.read().unwrap();
- let chan_id = prev_hop.outpoint.to_channel_id();
+ let chan_id = prev_hop.channel_id;
let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
None => None
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
}
if !during_init {
- handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
+ handle_new_monitor_update!(self, prev_hop.outpoint, prev_hop.channel_id, monitor_update, peer_state_lock,
peer_state, per_peer_state, chan);
} else {
// If we're running during init we cannot update a monitor directly -
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id,
funding_txo: prev_hop.outpoint,
+ channel_id: prev_hop.channel_id,
update: monitor_update.clone(),
});
}
log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
chan_id, action);
- let (node_id, funding_outpoint, blocker) =
+ let (node_id, _funding_outpoint, channel_id, blocker) =
if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
downstream_counterparty_node_id: node_id,
downstream_funding_outpoint: funding_outpoint,
- blocking_action: blocker,
+ blocking_action: blocker, downstream_channel_id: channel_id,
} = action {
- (node_id, funding_outpoint, blocker)
+ (node_id, funding_outpoint, channel_id, blocker)
} else {
debug_assert!(false,
"Duplicate claims should always free another channel immediately");
let mut peer_state = peer_state_mtx.lock().unwrap();
if let Some(blockers) = peer_state
.actions_blocking_raa_monitor_updates
- .get_mut(&funding_outpoint.to_channel_id())
+ .get_mut(&channel_id)
{
let mut found_blocker = false;
blockers.retain(|iter| {
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
payment_preimage,
}],
+ channel_id: Some(prev_hop.channel_id),
};
if !during_init {
// with a preimage we *must* somehow manage to propagate it to the upstream
// channel, or we must have an ability to receive the same event and try
// again on restart.
- log_error!(WithContext::from(&self.logger, None, Some(prev_hop.outpoint.to_channel_id())), "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+ log_error!(WithContext::from(&self.logger, None, Some(prev_hop.channel_id)),
+ "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
payment_preimage, update_res);
}
} else {
// complete the monitor update completion action from `completion_action`.
self.pending_background_events.lock().unwrap().push(
BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((
- prev_hop.outpoint, preimage_update,
+ prev_hop.outpoint, prev_hop.channel_id, preimage_update,
)));
}
// Note that we do process the completion action here. This totally could be a
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, startup_replay: bool,
- next_channel_counterparty_node_id: Option<PublicKey>, next_channel_outpoint: OutPoint
+ next_channel_counterparty_node_id: Option<PublicKey>, next_channel_outpoint: OutPoint,
+ next_channel_id: ChannelId,
) {
match source {
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
debug_assert_eq!(pubkey, path.hops[0].pubkey);
}
let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
- channel_funding_outpoint: next_channel_outpoint,
+ channel_funding_outpoint: next_channel_outpoint, channel_id: next_channel_id,
counterparty_node_id: path.hops[0].pubkey,
};
self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage,
&self.logger);
},
HTLCSource::PreviousHopData(hop_data) => {
- let prev_outpoint = hop_data.outpoint;
+ let prev_channel_id = hop_data.channel_id;
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
#[cfg(debug_assertions)]
let claiming_chan_funding_outpoint = hop_data.outpoint;
+ #[cfg(debug_assertions)]
+ let claiming_channel_id = hop_data.channel_id;
let res = self.claim_funds_from_hop(hop_data, payment_preimage,
|htlc_claim_value_msat, definitely_duplicate| {
let chan_to_release =
if let Some(node_id) = next_channel_counterparty_node_id {
- Some((node_id, next_channel_outpoint, completed_blocker))
+ Some((node_id, next_channel_outpoint, next_channel_id, completed_blocker))
} else {
// We can only get `None` here if we are processing a
// `ChannelMonitor`-originated event, in which case we
},
// or the channel we'd unblock is already closed,
BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup(
- (funding_txo, monitor_update)
+ (funding_txo, _channel_id, monitor_update)
) => {
if *funding_txo == next_channel_outpoint {
assert_eq!(monitor_update.updates.len(), 1);
BackgroundEvent::MonitorUpdatesComplete {
channel_id, ..
} =>
- *channel_id == claiming_chan_funding_outpoint.to_channel_id(),
+ *channel_id == claiming_channel_id,
}
}), "{:?}", *background_events);
}
Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
downstream_counterparty_node_id: other_chan.0,
downstream_funding_outpoint: other_chan.1,
- blocking_action: other_chan.2,
+ downstream_channel_id: other_chan.2,
+ blocking_action: other_chan.3,
})
} else { None }
} else {
event: events::Event::PaymentForwarded {
fee_earned_msat,
claim_from_onchain_tx: from_onchain,
- prev_channel_id: Some(prev_outpoint.to_channel_id()),
- next_channel_id: Some(next_channel_outpoint.to_channel_id()),
+ prev_channel_id: Some(prev_channel_id),
+ next_channel_id: Some(next_channel_id),
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
},
downstream_counterparty_and_funding_outpoint: chan_to_release,
event, downstream_counterparty_and_funding_outpoint
} => {
self.pending_events.lock().unwrap().push_back((event, None));
- if let Some((node_id, funding_outpoint, blocker)) = downstream_counterparty_and_funding_outpoint {
- self.handle_monitor_update_release(node_id, funding_outpoint, Some(blocker));
+ if let Some((node_id, funding_outpoint, channel_id, blocker)) = downstream_counterparty_and_funding_outpoint {
+ self.handle_monitor_update_release(node_id, funding_outpoint, channel_id, Some(blocker));
}
},
MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
- downstream_counterparty_node_id, downstream_funding_outpoint, blocking_action,
+ downstream_counterparty_node_id, downstream_funding_outpoint, downstream_channel_id, blocking_action,
} => {
self.handle_monitor_update_release(
downstream_counterparty_node_id,
downstream_funding_outpoint,
+ downstream_channel_id,
Some(blocking_action),
);
},
commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
- -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
+ -> Option<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> {
let logger = WithChannelContext::from(&self.logger, &channel.context);
log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
&channel.context.channel_id(),
let counterparty_node_id = channel.context.get_counterparty_node_id();
if !pending_forwards.is_empty() {
htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()),
- channel.context.get_funding_txo().unwrap(), channel.context.get_user_id(), pending_forwards));
+ channel.context.get_funding_txo().unwrap(), channel.context.channel_id(), channel.context.get_user_id(), pending_forwards));
}
if let Some(msg) = channel_ready {
htlc_forwards
}
- fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
+ fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
let counterparty_node_id = match counterparty_node_id {
peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
let channel =
- if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) {
+ if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
chan
} else {
let update_actions = peer_state.monitor_update_blocked_actions
- .remove(&funding_txo.to_channel_id()).unwrap_or(Vec::new());
+ .remove(&channel_id).unwrap_or(Vec::new());
mem::drop(peer_state_lock);
mem::drop(per_peer_state);
self.handle_monitor_update_completion_actions(update_actions);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
- let err_str = format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id);
+ let err_str = format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id);
log_error!(logger, "{}", err_str);
- APIError::ChannelUnavailable { err: err_str }
+ APIError::ChannelUnavailable { err: err_str }
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
APIError::ChannelUnavailable { err: err_str }
})
}
- _ => {
+ _ => {
let err_str = "No such channel awaiting to be accepted.".to_owned();
log_error!(logger, "{}", err_str);
}
// Update the monitor with the shutdown script if necessary.
if let Some(monitor_update) = monitor_update_opt {
- handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+ handle_new_monitor_update!(self, funding_txo_opt.unwrap(), chan.context.channel_id(), monitor_update,
peer_state_lock, peer_state, per_peer_state, chan);
}
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
- self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, false, Some(*counterparty_node_id), funding_txo);
+ self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value),
+ false, false, Some(*counterparty_node_id), funding_txo, msg.channel_id);
Ok(())
}
let funding_txo = chan.context.get_funding_txo();
let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &&logger), chan_phase_entry);
if let Some(monitor_update) = monitor_update_opt {
- handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
+ handle_new_monitor_update!(self, funding_txo.unwrap(), chan.context.channel_id(), monitor_update, peer_state_lock,
peer_state, per_peer_state, chan);
}
Ok(())
}
#[inline]
- fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) {
- for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
+ fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) {
+ for &mut (prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
let mut push_forward_event = false;
let mut new_intercept_events = VecDeque::new();
let mut failed_intercept_forwards = Vec::new();
match forward_htlcs.entry(scid) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }));
+ prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info }));
},
hash_map::Entry::Vacant(entry) => {
if !is_our_scid && forward_info.incoming_amt_msat.is_some() &&
intercept_id
}, None));
entry.insert(PendingAddHTLCInfo {
- prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info });
+ prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info });
},
hash_map::Entry::Occupied(_) => {
- let logger = WithContext::from(&self.logger, None, Some(prev_funding_outpoint.to_channel_id()));
+ let logger = WithContext::from(&self.logger, None, Some(prev_channel_id));
log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
outpoint: prev_funding_outpoint,
+ channel_id: prev_channel_id,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: forward_info.incoming_shared_secret,
phantom_shared_secret: None,
push_forward_event = true;
}
entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info })));
+ prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info })));
}
}
}
/// the [`ChannelMonitorUpdate`] in question.
fn raa_monitor_updates_held(&self,
actions_blocking_raa_monitor_updates: &BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
- channel_funding_outpoint: OutPoint, counterparty_node_id: PublicKey
+ channel_funding_outpoint: OutPoint, channel_id: ChannelId, counterparty_node_id: PublicKey
) -> bool {
actions_blocking_raa_monitor_updates
- .get(&channel_funding_outpoint.to_channel_id()).map(|v| !v.is_empty()).unwrap_or(false)
+ .get(&channel_id).map(|v| !v.is_empty()).unwrap_or(false)
|| self.pending_events.lock().unwrap().iter().any(|(_, action)| {
action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint,
+ channel_id,
counterparty_node_id,
})
})
if let Some(chan) = peer_state.channel_by_id.get(&channel_id) {
return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
- chan.context().get_funding_txo().unwrap(), counterparty_node_id);
+ chan.context().get_funding_txo().unwrap(), channel_id, counterparty_node_id);
}
}
false
let funding_txo_opt = chan.context.get_funding_txo();
let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
self.raa_monitor_updates_held(
- &peer_state.actions_blocking_raa_monitor_updates, funding_txo,
+ &peer_state.actions_blocking_raa_monitor_updates, funding_txo, msg.channel_id,
*counterparty_node_id)
} else { false };
let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self,
if let Some(monitor_update) = monitor_update_opt {
let funding_txo = funding_txo_opt
.expect("Funding outpoint must have been set for RAA handling to succeed");
- handle_new_monitor_update!(self, funding_txo, monitor_update,
+ handle_new_monitor_update!(self, funding_txo, chan.context.channel_id(), monitor_update,
peer_state_lock, peer_state, per_peer_state, chan);
}
htlcs_to_fail
let mut failed_channels = Vec::new();
let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
let has_pending_monitor_events = !pending_monitor_events.is_empty();
- for (funding_outpoint, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
+ for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
for monitor_event in monitor_events.drain(..) {
match monitor_event {
MonitorEvent::HTLCEvent(htlc_update) => {
- let logger = WithContext::from(&self.logger, counterparty_node_id, Some(funding_outpoint.to_channel_id()));
+ let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id));
if let Some(preimage) = htlc_update.payment_preimage {
log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
- self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint);
+ self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint, channel_id);
} else {
log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
- let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
+ let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
}
},
- MonitorEvent::HolderForceClosed(funding_outpoint) => {
+ MonitorEvent::HolderForceClosed(_funding_outpoint) => {
let counterparty_node_id_opt = match counterparty_node_id {
Some(cp_id) => Some(cp_id),
None => {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
- if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
+ if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) {
if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
}
}
},
- MonitorEvent::Completed { funding_txo, monitor_update_id } => {
- self.channel_monitor_updated(&funding_txo, monitor_update_id, counterparty_node_id.as_ref());
+ MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => {
+ self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref());
},
}
}
if let Some(monitor_update) = monitor_opt {
has_monitor_update = true;
- handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
+ handle_new_monitor_update!(self, funding_txo.unwrap(), chan.context.channel_id(), monitor_update,
peer_state_lock, peer_state, per_peer_state, chan);
continue 'peer_loop;
}
// Channel::force_shutdown tries to make us do) as we may still be in initialization,
// so we track the update internally and handle it when the user next calls
// timer_tick_occurred, guaranteeing we're running normally.
- if let Some((counterparty_node_id, funding_txo, update)) = failure.monitor_update.take() {
+ if let Some((counterparty_node_id, funding_txo, channel_id, update)) = failure.monitor_update.take() {
assert_eq!(update.updates.len(), 1);
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
assert!(should_broadcast);
} else { unreachable!(); }
self.pending_background_events.lock().unwrap().push(
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
- counterparty_node_id, funding_txo, update
+ counterparty_node_id, funding_txo, update, channel_id,
});
}
self.finish_close_channel(failure);
let payment_paths = self.create_blinded_payment_paths(amount_msats, payment_secret)
.map_err(|_| Bolt12SemanticError::MissingPaths)?;
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
let builder = refund.respond_using_derived_keys(
payment_paths, payment_hash, expanded_key, entropy
)?;
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
let created_at = Duration::from_secs(
self.highest_seen_timestamp.load(Ordering::Acquire) as u64
);
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
let builder = refund.respond_using_derived_keys_no_std(
payment_paths, payment_hash, created_at, expanded_key, entropy
)?;
/// [`Event`] being handled) completes, this should be called to restore the channel to normal
/// operation. It will double-check that nothing *else* is also blocking the same channel from
/// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly.
- fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
+ fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey,
+ channel_funding_outpoint: OutPoint, channel_id: ChannelId,
+ mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
+
let logger = WithContext::from(
- &self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id())
+ &self.logger, Some(counterparty_node_id), Some(channel_id),
);
loop {
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(blocker) = completed_blocker.take() {
// Only do this on the first iteration of the loop.
if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
- .get_mut(&channel_funding_outpoint.to_channel_id())
+ .get_mut(&channel_id)
{
blockers.retain(|iter| iter != &blocker);
}
}
if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
- channel_funding_outpoint, counterparty_node_id) {
+ channel_funding_outpoint, channel_id, counterparty_node_id) {
// Check that, while holding the peer lock, we don't have anything else
// blocking monitor updates for this channel. If we do, release the monitor
// update(s) when those blockers complete.
log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
- &channel_funding_outpoint.to_channel_id());
+ &channel_id);
break;
}
- if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
+ if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(
+ channel_id) {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint);
if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
- channel_funding_outpoint.to_channel_id());
- handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
+ channel_id);
+ handle_new_monitor_update!(self, channel_funding_outpoint, channel_id, monitor_update,
peer_state_lck, peer_state, per_peer_state, chan);
if further_update_exists {
// If there are more `ChannelMonitorUpdate`s to process, restart at the
}
} else {
log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update",
- channel_funding_outpoint.to_channel_id());
+ channel_id);
}
}
}
for action in actions {
match action {
EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
- channel_funding_outpoint, counterparty_node_id
+ channel_funding_outpoint, channel_id, counterparty_node_id
} => {
- self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, None);
+ self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, channel_id, None);
}
}
}
incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret,
phantom_shared_secret: None,
outpoint: htlc.prev_funding_outpoint,
+ channel_id: htlc.prev_channel_id,
blinded_failure: htlc.forward_info.routing.blinded_failure(),
});
HTLCFailReason::from_failure_code(0x2000 | 2),
HTLCDestination::InvalidForward { requested_forward_scid }));
let logger = WithContext::from(
- &self.logger, None, Some(htlc.prev_funding_outpoint.to_channel_id())
+ &self.logger, None, Some(htlc.prev_channel_id)
);
log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
false
},
};
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
let created_at = Duration::from_secs(
self.highest_seen_timestamp.load(Ordering::Acquire) as u64
);
if invoice_request.keys.is_some() {
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
let builder = invoice_request.respond_using_derived_keys(
payment_paths, payment_hash
);
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
let builder = invoice_request.respond_using_derived_keys_no_std(
payment_paths, payment_hash, created_at
);
Err(error) => Some(OffersMessage::InvoiceError(error.into())),
}
} else {
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
let builder = invoice_request.respond_with(payment_paths, payment_hash);
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
let builder = invoice_request.respond_with_no_std(
payment_paths, payment_hash, created_at
);
features.set_channel_type_optional();
features.set_scid_privacy_optional();
features.set_zero_conf_optional();
+ features.set_route_blinding_optional();
if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx {
features.set_anchors_zero_fee_htlc_tx_optional();
}
impl_writeable_tlv_based!(BlindedForward, {
(0, inbound_blinding_point, required),
+ (1, failure, (default_value, BlindedFailure::FromIntroductionNode)),
});
impl_writeable_tlv_based_enum!(PendingHTLCRouting,
(4, htlc_id, required),
(6, incoming_packet_shared_secret, required),
(7, user_channel_id, option),
+ // Note that by the time we get past the required read for type 2 above, outpoint will be
+ // filled in, so we can safely unwrap it here.
+ (9, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(outpoint.0.unwrap()))),
});
impl Writeable for ClaimableHTLC {
(2, prev_short_channel_id, required),
(4, prev_htlc_id, required),
(6, prev_funding_outpoint, required),
+ // Note that by the time we get past the required read for type 2 above, prev_funding_outpoint will be
+ // filled in, so we can safely unwrap it here.
+ (7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))),
});
impl Writeable for HTLCForwardInfo {
let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut channel_closures = VecDeque::new();
let mut close_background_events = Vec::new();
+ let mut funding_txo_to_channel_id = HashMap::with_capacity(channel_count as usize);
for _ in 0..channel_count {
let mut channel: Channel<SP> = Channel::read(reader, (
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
))?;
let logger = WithChannelContext::from(&args.logger, &channel.context);
let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
+ funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id());
funding_txo_set.insert(funding_txo.clone());
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
return Err(DecodeError::InvalidValue);
}
- if let Some((counterparty_node_id, funding_txo, update)) = shutdown_result.monitor_update {
+ if let Some((counterparty_node_id, funding_txo, channel_id, update)) = shutdown_result.monitor_update {
close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
- counterparty_node_id, funding_txo, update
+ counterparty_node_id, funding_txo, channel_id, update
});
}
failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs);
reason: ClosureReason::OutdatedChannelManager,
counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
channel_capacity_sats: Some(channel.context.get_value_satoshis()),
+ channel_funding_txo: channel.context.get_funding_txo(),
}, None));
for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
let mut found_htlc = false;
reason: ClosureReason::DisconnectedPeer,
counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
channel_capacity_sats: Some(channel.context.get_value_satoshis()),
+ channel_funding_txo: channel.context.get_funding_txo(),
}, None));
} else {
log_error!(logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id());
for (funding_txo, monitor) in args.channel_monitors.iter() {
if !funding_txo_set.contains(funding_txo) {
let logger = WithChannelMonitor::from(&args.logger, monitor);
+ let channel_id = monitor.channel_id();
log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
- &funding_txo.to_channel_id());
+ &channel_id);
let monitor_update = ChannelMonitorUpdate {
update_id: CLOSED_CHANNEL_UPDATE_ID,
counterparty_node_id: None,
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
+ channel_id: Some(monitor.channel_id()),
};
- close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
+ close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, channel_id, monitor_update)));
}
}
$chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
for update in $chan_in_flight_upds.iter() {
log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
- update.update_id, $channel_info_log, &$funding_txo.to_channel_id());
+ update.update_id, $channel_info_log, &$monitor.channel_id());
max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
pending_background_events.push(
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id: $counterparty_node_id,
funding_txo: $funding_txo,
+ channel_id: $monitor.channel_id(),
update: update.clone(),
});
}
pending_background_events.push(
BackgroundEvent::MonitorUpdatesComplete {
counterparty_node_id: $counterparty_node_id,
- channel_id: $funding_txo.to_channel_id(),
+ channel_id: $monitor.channel_id(),
});
}
if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() {
if let Some(in_flight_upds) = in_flight_monitor_updates {
for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
- let logger = WithContext::from(&args.logger, Some(counterparty_id), Some(funding_txo.to_channel_id()));
+ let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied();
+ let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id);
if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
// Now that we've removed all the in-flight monitor updates for channels that are
// still open, we need to replay any monitor updates that are for closed channels,
funding_txo, monitor, peer_state, logger, "closed ");
} else {
log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
- log_error!(logger, " The ChannelMonitor for channel {} is missing.",
- &funding_txo.to_channel_id());
+ log_error!(logger, " The ChannelMonitor for channel {} is missing.", if let Some(channel_id) =
+ channel_id { channel_id.to_string() } else { format!("with outpoint {}", funding_txo) } );
log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
if pending_forward_matches_htlc(&htlc_info) {
log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
- &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
+ &htlc.payment_hash, &monitor.channel_id());
false
} else { true }
} else { true }
pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
if pending_forward_matches_htlc(&htlc_info) {
log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
- &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
+ &htlc.payment_hash, &monitor.channel_id());
pending_events_read.retain(|(event, _)| {
if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
intercepted_id != ev_id
let compl_action =
EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint: monitor.get_funding_txo().0,
+ channel_id: monitor.channel_id(),
counterparty_node_id: path.hops[0].pubkey,
};
pending_outbounds.claim_htlc(payment_id, preimage, session_priv,
// channel_id -> peer map entry).
counterparty_opt.is_none(),
counterparty_opt.cloned().or(monitor.get_counterparty_node_id()),
- monitor.get_funding_txo().0))
+ monitor.get_funding_txo().0, monitor.channel_id()))
} else { None }
} else {
// If it was an outbound payment, we've handled it above - if a preimage
// this channel as well. On the flip side, there's no harm in restarting
// without the new monitor persisted - we'll end up right back here on
// restart.
- let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id();
+ let previous_channel_id = claimable_htlc.prev_hop.channel_id;
if let Some(peer_node_id) = outpoint_to_peer.get(&claimable_htlc.prev_hop.outpoint) {
let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap();
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
for action in actions.iter() {
if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
downstream_counterparty_and_funding_outpoint:
- Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), ..
+ Some((blocked_node_id, _blocked_channel_outpoint, blocked_channel_id, blocking_action)), ..
} = action {
if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
+ let channel_id = blocked_channel_id;
log_trace!(logger,
"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
- blocked_channel_outpoint.to_channel_id());
+ channel_id);
blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
- .entry(blocked_channel_outpoint.to_channel_id())
+ .entry(*channel_id)
.or_insert_with(Vec::new).push(blocking_action.clone());
} else {
// If the channel we were blocking has closed, we don't need to
channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
- for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding) in pending_claims_to_replay {
+ for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding, downstream_channel_id) in pending_claims_to_replay {
// We use `downstream_closed` in place of `from_onchain` here just as a guess - we
// don't remember in the `ChannelMonitor` where we got a preimage from, but if the
// channel is closed we just assume that it probably came from an on-chain claim.
channel_manager.claim_funds_internal(source, preimage, Some(downstream_value),
- downstream_closed, true, downstream_node_id, downstream_funding);
+ downstream_closed, true, downstream_node_id, downstream_funding, downstream_channel_id);
}
//TODO: Broadcast channel update for closed channels, but only after we've made a
let sender_intended_amt_msat = 100;
let extra_fee_msat = 10;
let hop_data = msgs::InboundOnionPayload::Receive {
- amt_msat: 100,
- outgoing_cltv_value: 42,
+ sender_intended_htlc_amt_msat: 100,
+ cltv_expiry_height: 42,
payment_metadata: None,
keysend_preimage: None,
payment_data: Some(msgs::FinalOnionHopData {
// Check that if the amount we received + the penultimate hop extra fee is less than the sender
// intended amount, we fail the payment.
let current_height: u32 = node[0].node.best_block.read().unwrap().height();
- if let Err(crate::ln::channelmanager::InboundOnionErr { err_code, .. }) =
+ if let Err(crate::ln::channelmanager::InboundHTLCErr { err_code, .. }) =
create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat),
current_height, node[0].node.default_configuration.accept_mpp_keysend)
// If amt_received + extra_fee is equal to the sender intended amount, we're fine.
let hop_data = msgs::InboundOnionPayload::Receive { // This is the same payload as above, InboundOnionPayload doesn't implement Clone
- amt_msat: 100,
- outgoing_cltv_value: 42,
+ sender_intended_htlc_amt_msat: 100,
+ cltv_expiry_height: 42,
payment_metadata: None,
keysend_preimage: None,
payment_data: Some(msgs::FinalOnionHopData {
let current_height: u32 = node[0].node.best_block.read().unwrap().height();
let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive {
- amt_msat: 100,
- outgoing_cltv_value: 22,
+ sender_intended_htlc_amt_msat: 100,
+ cltv_expiry_height: 22,
payment_metadata: None,
keysend_preimage: None,
payment_data: Some(msgs::FinalOnionHopData {
use bitcoin::blockdata::locktime::absolute::LockTime;
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
- use bitcoin::{Block, Transaction, TxOut};
+ use bitcoin::{Transaction, TxOut};
use crate::sync::{Arc, Mutex, RwLock};
let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
let scorer = RwLock::new(test_utils::TestScorer::new());
- let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &scorer);
+ let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &logger_a, &scorer);
let mut config: UserConfig = Default::default();
config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);
}
}
+impl<T: sealed::RouteBlinding> Features<T> {
+ #[cfg(test)]
+ pub(crate) fn clear_route_blinding(&mut self) {
+ <T as sealed::RouteBlinding>::clear_bits(&mut self.flags);
+ }
+}
+
#[cfg(test)]
impl<T: sealed::UnknownFeature> Features<T> {
pub(crate) fn unknown() -> Self {
//! nodes for functional tests.
use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen, Watch, chainmonitor::Persist};
-use crate::sign::EntropySource;
use crate::chain::channelmonitor::ChannelMonitor;
use crate::chain::transaction::OutPoint;
use crate::events::{ClaimedHTLC, ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, PaymentFailureReason};
use crate::events::bump_transaction::{BumpTransactionEvent, BumpTransactionEventHandler, Wallet, WalletSource};
use crate::ln::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret};
use crate::ln::channelmanager::{AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, PaymentId, MIN_CLTV_EXPIRY_DELTA};
-use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate};
-use crate::routing::router::{self, PaymentParameters, Route, RouteParameters};
use crate::ln::features::InitFeatures;
use crate::ln::msgs;
-use crate::ln::msgs::{ChannelMessageHandler,RoutingMessageHandler};
-use crate::util::test_channel_signer::TestChannelSigner;
+use crate::ln::msgs::{ChannelMessageHandler, OnionMessageHandler, RoutingMessageHandler};
+use crate::ln::peer_handler::IgnoringMessageHandler;
+use crate::onion_message::messenger::OnionMessenger;
+use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate};
+use crate::routing::router::{self, PaymentParameters, Route, RouteParameters};
+use crate::sign::{EntropySource, RandomBytes};
+use crate::util::config::{UserConfig, MaxDustHTLCExposure};
+use crate::util::errors::APIError;
+#[cfg(test)]
+use crate::util::logger::Logger;
use crate::util::scid_utils;
+use crate::util::test_channel_signer::TestChannelSigner;
use crate::util::test_utils;
use crate::util::test_utils::{panicking, TestChainMonitor, TestScorer, TestKeysInterface};
-use crate::util::errors::APIError;
-use crate::util::config::{UserConfig, MaxDustHTLCExposure};
use crate::util::ser::{ReadableArgs, Writeable};
-#[cfg(test)]
-use crate::util::logger::Logger;
use bitcoin::blockdata::block::{Block, Header, Version};
use bitcoin::blockdata::locktime::absolute::LockTime;
use bitcoin::pow::CompactTarget;
use bitcoin::secp256k1::{PublicKey, SecretKey};
+use alloc::rc::Rc;
+use core::cell::RefCell;
+use core::iter::repeat;
+use core::mem;
+use core::ops::Deref;
use crate::io;
use crate::prelude::*;
-use core::cell::RefCell;
-use alloc::rc::Rc;
use crate::sync::{Arc, Mutex, LockTestExt, RwLock};
-use core::mem;
-use core::iter::repeat;
pub const CHAN_CONFIRM_DEPTH: u32 = 10;
fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) {
// Ensure `get_claimable_balances`' self-tests never panic
- for funding_outpoint in node.chain_monitor.chain_monitor.list_monitors() {
+ for (funding_outpoint, _channel_id) in node.chain_monitor.chain_monitor.list_monitors() {
node.chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances();
}
}
pub tx_broadcaster: &'a test_utils::TestBroadcaster,
pub fee_estimator: &'a test_utils::TestFeeEstimator,
pub router: test_utils::TestRouter<'a>,
+ pub message_router: test_utils::TestMessageRouter<'a>,
pub chain_monitor: test_utils::TestChainMonitor<'a>,
pub keys_manager: &'a test_utils::TestKeysInterface,
pub logger: &'a test_utils::TestLogger,
&'chan_mon_cfg test_utils::TestLogger,
>;
+type TestOnionMessenger<'chan_man, 'node_cfg, 'chan_mon_cfg> = OnionMessenger<
+ DedicatedEntropy,
+ &'node_cfg test_utils::TestKeysInterface,
+ &'chan_mon_cfg test_utils::TestLogger,
+ &'node_cfg test_utils::TestMessageRouter<'chan_mon_cfg>,
+ &'chan_man TestChannelManager<'node_cfg, 'chan_mon_cfg>,
+ IgnoringMessageHandler,
+>;
+
+/// For use with [`OnionMessenger`] otherwise `test_restored_packages_retry` will fail. This is
+/// because that test uses older serialized data produced by calling [`EntropySource`] in a specific
+/// manner. Using the same [`EntropySource`] with [`OnionMessenger`] would introduce another call,
+/// causing the produced data to no longer match.
+pub struct DedicatedEntropy(RandomBytes);
+
+impl Deref for DedicatedEntropy {
+ type Target = RandomBytes;
+ fn deref(&self) -> &Self::Target { &self.0 }
+}
+
pub struct Node<'chan_man, 'node_cfg: 'chan_man, 'chan_mon_cfg: 'node_cfg> {
pub chain_source: &'chan_mon_cfg test_utils::TestChainSource,
pub tx_broadcaster: &'chan_mon_cfg test_utils::TestBroadcaster,
pub chain_monitor: &'node_cfg test_utils::TestChainMonitor<'chan_mon_cfg>,
pub keys_manager: &'chan_mon_cfg test_utils::TestKeysInterface,
pub node: &'chan_man TestChannelManager<'node_cfg, 'chan_mon_cfg>,
+ pub onion_messenger: TestOnionMessenger<'chan_man, 'node_cfg, 'chan_mon_cfg>,
pub network_graph: &'node_cfg NetworkGraph<&'chan_mon_cfg test_utils::TestLogger>,
pub gossip_sync: P2PGossipSync<&'node_cfg NetworkGraph<&'chan_mon_cfg test_utils::TestLogger>, &'chan_mon_cfg test_utils::TestChainSource, &'chan_mon_cfg test_utils::TestLogger>,
pub node_seed: [u8; 32],
&'chan_mon_cfg test_utils::TestLogger,
>,
}
+
+impl<'a, 'b, 'c> Node<'a, 'b, 'c> {
+ pub fn init_features(&self, peer_node_id: &PublicKey) -> InitFeatures {
+ self.override_init_features.borrow().clone()
+ .unwrap_or_else(|| self.node.init_features() | self.onion_messenger.provided_init_features(peer_node_id))
+ }
+}
+
#[cfg(feature = "std")]
impl<'a, 'b, 'c> std::panic::UnwindSafe for Node<'a, 'b, 'c> {}
#[cfg(feature = "std")]
let feeest = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
let mut deserialized_monitors = Vec::new();
{
- for outpoint in self.chain_monitor.chain_monitor.list_monitors() {
+ for (outpoint, _channel_id) in self.chain_monitor.chain_monitor.list_monitors() {
let mut w = test_utils::TestVecWriter(Vec::new());
self.chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut w).unwrap();
let (_, deserialized_monitor) = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(
node_signer: self.keys_manager,
signer_provider: self.keys_manager,
fee_estimator: &test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) },
- router: &test_utils::TestRouter::new(Arc::new(network_graph), &scorer),
+ router: &test_utils::TestRouter::new(Arc::new(network_graph), &self.logger, &scorer),
chain_monitor: self.chain_monitor,
tx_broadcaster: &broadcaster,
logger: &self.logger,
let chain_source = test_utils::TestChainSource::new(Network::Testnet);
let chain_monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &broadcaster, &self.logger, &feeest, &persister, &self.keys_manager);
for deserialized_monitor in deserialized_monitors.drain(..) {
- if chain_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) != Ok(ChannelMonitorUpdateStatus::Completed) {
+ let funding_outpoint = deserialized_monitor.get_funding_txo().0;
+ if chain_monitor.watch_channel(funding_outpoint, deserialized_monitor) != Ok(ChannelMonitorUpdateStatus::Completed) {
panic!();
}
}
assert!(node_read.is_empty());
for monitor in monitors_read.drain(..) {
- assert_eq!(node.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
+ let funding_outpoint = monitor.get_funding_txo().0;
+ assert_eq!(node.chain_monitor.watch_channel(funding_outpoint, monitor),
Ok(ChannelMonitorUpdateStatus::Completed));
check_added_monitors!(node, 1);
}
$new_channelmanager = _reload_node(&$node, $new_config, &chanman_encoded, $monitors_encoded);
$node.node = &$new_channelmanager;
+ $node.onion_messenger.set_offers_handler(&$new_channelmanager);
};
($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => {
reload_node!($node, $crate::util::config::UserConfig::default(), $chanman_encoded, $monitors_encoded, $persister, $new_chain_monitor, $new_channelmanager);
pub counterparty_node_id: Option<PublicKey>,
pub discard_funding: bool,
pub reason: Option<ClosureReason>,
+ pub channel_funding_txo: Option<OutPoint>,
+ pub user_channel_id: Option<u128>,
}
impl ExpectedCloseEvent {
counterparty_node_id: None,
discard_funding,
reason: Some(reason),
+ channel_funding_txo: None,
+ user_channel_id: None,
}
}
}
reason,
counterparty_node_id,
channel_capacity_sats,
+ channel_funding_txo,
+ user_channel_id,
..
} if (
expected_event.channel_id.map(|expected| *channel_id == expected).unwrap_or(true) &&
expected_event.reason.as_ref().map(|expected| reason == expected).unwrap_or(true) &&
- expected_event.counterparty_node_id.map(|expected| *counterparty_node_id == Some(expected)).unwrap_or(true) &&
- expected_event.channel_capacity_sats.map(|expected| *channel_capacity_sats == Some(expected)).unwrap_or(true)
+ expected_event.
+ counterparty_node_id.map(|expected| *counterparty_node_id == Some(expected)).unwrap_or(true) &&
+ expected_event.channel_capacity_sats
+ .map(|expected| *channel_capacity_sats == Some(expected)).unwrap_or(true) &&
+ expected_event.channel_funding_txo
+ .map(|expected| *channel_funding_txo == Some(expected)).unwrap_or(true) &&
+ expected_event.user_channel_id
+ .map(|expected| *user_channel_id == expected).unwrap_or(true)
)
)));
}
counterparty_node_id: Some(*node_id),
discard_funding: is_check_discard_funding,
reason: Some(expected_reason.clone()),
+ channel_funding_txo: None,
+ user_channel_id: None,
}).collect::<Vec<_>>();
check_closed_events(node, expected_close_events.as_slice());
}
logger: &chanmon_cfgs[i].logger,
tx_broadcaster: &chanmon_cfgs[i].tx_broadcaster,
fee_estimator: &chanmon_cfgs[i].fee_estimator,
- router: test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[i].scorer),
+ router: test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[i].logger, &chanmon_cfgs[i].scorer),
+ message_router: test_utils::TestMessageRouter::new(network_graph.clone()),
chain_monitor,
keys_manager: &chanmon_cfgs[i].keys_manager,
node_seed: seed,
let connect_style = Rc::new(RefCell::new(ConnectStyle::random_style()));
for i in 0..node_count {
+ let dedicated_entropy = DedicatedEntropy(RandomBytes::new([i as u8; 32]));
+ let onion_messenger = OnionMessenger::new(
+ dedicated_entropy, cfgs[i].keys_manager, cfgs[i].logger, &cfgs[i].message_router,
+ &chan_mgrs[i], IgnoringMessageHandler {},
+ );
let gossip_sync = P2PGossipSync::new(cfgs[i].network_graph.as_ref(), None, cfgs[i].logger);
let wallet_source = Arc::new(test_utils::TestWalletSource::new(SecretKey::from_slice(&[i as u8 + 1; 32]).unwrap()));
nodes.push(Node{
fee_estimator: cfgs[i].fee_estimator, router: &cfgs[i].router,
chain_monitor: &cfgs[i].chain_monitor, keys_manager: &cfgs[i].keys_manager,
node: &chan_mgrs[i], network_graph: cfgs[i].network_graph.as_ref(), gossip_sync,
- node_seed: cfgs[i].node_seed, network_chan_count: chan_count.clone(),
+ node_seed: cfgs[i].node_seed, onion_messenger, network_chan_count: chan_count.clone(),
network_payment_count: payment_count.clone(), logger: cfgs[i].logger,
blocks: Arc::clone(&cfgs[i].tx_broadcaster.blocks),
connect_style: Rc::clone(&connect_style),
for i in 0..node_count {
for j in (i+1)..node_count {
- nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &msgs::Init {
- features: nodes[j].override_init_features.borrow().clone().unwrap_or_else(|| nodes[j].node.init_features()),
+ let node_id_i = nodes[i].node.get_our_node_id();
+ let node_id_j = nodes[j].node.get_our_node_id();
+
+ let init_i = msgs::Init {
+ features: nodes[i].init_features(&node_id_j),
networks: None,
remote_network_address: None,
- }, true).unwrap();
- nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &msgs::Init {
- features: nodes[i].override_init_features.borrow().clone().unwrap_or_else(|| nodes[i].node.init_features()),
+ };
+ let init_j = msgs::Init {
+ features: nodes[j].init_features(&node_id_i),
networks: None,
remote_network_address: None,
- }, false).unwrap();
+ };
+
+ nodes[i].node.peer_connected(&node_id_j, &init_j, true).unwrap();
+ nodes[j].node.peer_connected(&node_id_i, &init_i, false).unwrap();
+ nodes[i].onion_messenger.peer_connected(&node_id_j, &init_j, true).unwrap();
+ nodes[j].onion_messenger.peer_connected(&node_id_i, &init_i, false).unwrap();
}
}
let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
- let router = test_utils::TestRouter::new(network_graph.clone(), &scorer);
- let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
+ let router = test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[0].logger, &scorer);
+ let message_router = test_utils::TestMessageRouter::new(network_graph.clone());
+ let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, message_router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
node_cfgs.remove(0);
node_cfgs.insert(0, node);
RecipientOnionFields::secret_only(our_payment_secret), height + 1, &None).unwrap();
// Edit amt_to_forward to simulate the sender having set
// the final amount and the routing node taking less fee
- if let msgs::OutboundOnionPayload::Receive { ref mut amt_msat, .. } = onion_payloads[1] {
- *amt_msat = 99_000;
+ if let msgs::OutboundOnionPayload::Receive {
+ ref mut sender_intended_htlc_amt_msat, ..
+ } = onion_payloads[1] {
+ *sender_intended_htlc_amt_msat = 99_000;
} else { panic!() }
let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
payment_event.msgs[0].onion_routing_packet = new_onion_packet;
check_added_monitors!(nodes[0], 0);
let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
- let channel_id = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
+ let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index });
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true,
check_added_monitors!(nodes[1], 1);
expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
let reason = ClosureReason::ProcessingError { err: format!("An existing channel using outpoint {} is open with peer {}", funding_output, nodes[2].node.get_our_node_id()), };
- check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(funding_output.to_channel_id(), true, reason)]);
+ check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(ChannelId::v1_from_funding_outpoint(funding_output), true, reason)]);
let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
let (_, _, _, real_channel_id, funding_tx) = create_chan_between_nodes(&nodes[0], &nodes[1]);
let real_chan_funding_txo = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 };
- assert_eq!(real_chan_funding_txo.to_channel_id(), real_channel_id);
+ assert_eq!(ChannelId::v1_from_funding_outpoint(real_chan_funding_txo), real_channel_id);
nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index };
- let channel_id = funding_outpoint.to_channel_id();
+ let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
// Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a
// temporary one).
// Complete the persistence of the monitor.
nodes[0].chain_monitor.complete_sole_pending_chan_update(
- &OutPoint { txid: tx.txid(), index: 1 }.to_channel_id()
+ &ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 1 })
);
let events = nodes[0].node.get_and_clear_pending_events();
nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
// The channels in the batch will close immediately.
- let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id();
- let channel_id_2 = OutPoint { txid: tx.txid(), index: 1 }.to_channel_id();
+ let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
+ let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
+ let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
+ let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
check_closed_events(&nodes[0], &[
ExpectedCloseEvent {
channel_id: Some(channel_id_1),
discard_funding: true,
+ channel_funding_txo: Some(funding_txo_1),
+ user_channel_id: Some(42),
..Default::default()
},
ExpectedCloseEvent {
channel_id: Some(channel_id_2),
discard_funding: true,
+ channel_funding_txo: Some(funding_txo_2),
+ user_channel_id: Some(43),
..Default::default()
},
]);
assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
// Force-close the channel for which we've completed the initial monitor.
- let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id();
- let channel_id_2 = OutPoint { txid: tx.txid(), index: 1 }.to_channel_id();
+ let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
+ let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
+ let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
+ let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
check_added_monitors(&nodes[0], 2);
{
ExpectedCloseEvent {
channel_id: Some(channel_id_1),
discard_funding: true,
+ channel_funding_txo: Some(funding_txo_1),
+ user_channel_id: Some(42),
..Default::default()
},
ExpectedCloseEvent {
channel_id: Some(channel_id_2),
discard_funding: true,
+ channel_funding_txo: Some(funding_txo_2),
+ user_channel_id: Some(43),
..Default::default()
},
]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
- let chan_id = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 }.to_channel_id();
+ let chan_id = ChannelId::v1_from_funding_outpoint(chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 });
assert_eq!(nodes[0].node.list_channels().len(), 1);
assert_eq!(nodes[1].node.list_channels().len(), 1);
use crate::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
use crate::ln::msgs;
use crate::ln::msgs::MAX_VALUE_MSAT;
-use crate::util::chacha20::ChaCha20;
-use crate::util::crypto::hkdf_extract_expand_5x;
+use crate::crypto::chacha20::ChaCha20;
+use crate::crypto::utils::hkdf_extract_expand_5x;
use crate::util::errors::APIError;
use crate::util::logger::Logger;
#[cfg(all(test, async_signing))]
#[allow(unused_mut)]
mod async_signer_tests;
+#[cfg(test)]
+#[allow(unused_mut)]
+mod offers_tests;
pub use self::peer_channel_encryptor::LN_MAX_MSG_LEN;
use crate::chain::chaininterface::{LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight};
use crate::events::bump_transaction::{BumpTransactionEvent, WalletSource};
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
-use crate::ln::channel;
+use crate::ln::{channel, ChannelId};
use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, PaymentId, RecipientOnionFields};
use crate::ln::msgs::ChannelMessageHandler;
use crate::util::config::UserConfig;
-use crate::util::crypto::sign;
+use crate::crypto::utils::sign;
use crate::util::ser::Writeable;
use crate::util::scid_utils::block_from_scid;
use crate::util::test_utils;
let (_, _, chan_id, funding_tx) =
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 1_000_000);
let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
- assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+ assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
let chan_feerate = get_feerate!(nodes[0], nodes[1], chan_id) as u64;
let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_id);
let (_, _, chan_id, funding_tx) =
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 1_000_000);
let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
- assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+ assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
// This HTLC is immediately claimed, giving node B the preimage
let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
let (_, _, chan_id, funding_tx) =
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000);
let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
- assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+ assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
// We create five HTLCs for B to claim against A's revoked commitment transaction:
//
let (_, _, chan_id, funding_tx) =
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 12_000_000);
let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
- assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+ assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
let failed_payment_hash = route_payment(&nodes[1], &[&nodes[0]], 1_000_000).1;
let (_, _, chan_id, funding_tx) =
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000);
let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
- assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+ assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
// We create two HTLCs, one which we will give A the preimage to to generate an HTLC-Success
// transaction, and one which we will not, allowing B to claim the HTLC output in an aggregated
use crate::io_extras::read_to_end;
use crate::events::{EventsProvider, MessageSendEventsProvider};
-use crate::util::chacha20poly1305rfc::ChaChaPolyReadAdapter;
+use crate::crypto::streams::ChaChaPolyReadAdapter;
use crate::util::logger;
use crate::util::ser::{LengthReadable, LengthReadableArgs, Readable, ReadableArgs, Writeable, Writer, WithoutLength, FixedLengthReader, HighZeroBytesDroppedBigSize, Hostname, TransactionU16LenLimited, BigSize};
use crate::util::base32;
/// Used in decrypting the onion packet's payload.
pub blinding_point: PublicKey,
/// The full onion packet including hop data, pubkey, and hmac
- pub onion_routing_packet: onion_message::Packet,
+ pub onion_routing_packet: onion_message::packet::Packet,
}
/// An [`update_fulfill_htlc`] message to be sent to or received from a peer.
payment_metadata: Option<Vec<u8>>,
keysend_preimage: Option<PaymentPreimage>,
custom_tlvs: Vec<(u64, Vec<u8>)>,
- amt_msat: u64,
- outgoing_cltv_value: u32,
+ sender_intended_htlc_amt_msat: u64,
+ cltv_expiry_height: u32,
},
BlindedForward {
short_channel_id: u64,
payment_relay: PaymentRelay,
payment_constraints: PaymentConstraints,
features: BlindedHopFeatures,
- intro_node_blinding_point: PublicKey,
+ intro_node_blinding_point: Option<PublicKey>,
},
BlindedReceive {
- amt_msat: u64,
+ sender_intended_htlc_amt_msat: u64,
total_msat: u64,
- outgoing_cltv_value: u32,
+ cltv_expiry_height: u32,
payment_secret: PaymentSecret,
payment_constraints: PaymentConstraints,
intro_node_blinding_point: Option<PublicKey>,
payment_metadata: Option<Vec<u8>>,
keysend_preimage: Option<PaymentPreimage>,
custom_tlvs: Vec<(u64, Vec<u8>)>,
- amt_msat: u64,
- outgoing_cltv_value: u32,
+ sender_intended_htlc_amt_msat: u64,
+ cltv_expiry_height: u32,
},
BlindedForward {
encrypted_tlvs: Vec<u8>,
intro_node_blinding_point: Option<PublicKey>,
},
BlindedReceive {
- amt_msat: u64,
+ sender_intended_htlc_amt_msat: u64,
total_msat: u64,
- outgoing_cltv_value: u32,
+ cltv_expiry_height: u32,
encrypted_tlvs: Vec<u8>,
intro_node_blinding_point: Option<PublicKey>, // Set if the introduction node of the blinded path is the final node
}
let blinding_point: PublicKey = Readable::read(r)?;
let len: u16 = Readable::read(r)?;
let mut packet_reader = FixedLengthReader::new(r, len as u64);
- let onion_routing_packet: onion_message::Packet = <onion_message::Packet as LengthReadable>::read(&mut packet_reader)?;
+ let onion_routing_packet: onion_message::packet::Packet =
+ <onion_message::packet::Packet as LengthReadable>::read(&mut packet_reader)?;
Ok(Self {
blinding_point,
onion_routing_packet,
});
},
Self::Receive {
- ref payment_data, ref payment_metadata, ref keysend_preimage, amt_msat,
- outgoing_cltv_value, ref custom_tlvs,
+ ref payment_data, ref payment_metadata, ref keysend_preimage, sender_intended_htlc_amt_msat,
+ cltv_expiry_height, ref custom_tlvs,
} => {
// We need to update [`ln::outbound_payment::RecipientOnionFields::with_custom_tlvs`]
// to reject any reserved types in the experimental range if new ones are ever
let mut custom_tlvs: Vec<&(u64, Vec<u8>)> = custom_tlvs.iter().chain(keysend_tlv.iter()).collect();
custom_tlvs.sort_unstable_by_key(|(typ, _)| *typ);
_encode_varint_length_prefixed_tlv!(w, {
- (2, HighZeroBytesDroppedBigSize(*amt_msat), required),
- (4, HighZeroBytesDroppedBigSize(*outgoing_cltv_value), required),
+ (2, HighZeroBytesDroppedBigSize(*sender_intended_htlc_amt_msat), required),
+ (4, HighZeroBytesDroppedBigSize(*cltv_expiry_height), required),
(8, payment_data, option),
(16, payment_metadata.as_ref().map(|m| WithoutLength(m)), option)
}, custom_tlvs.iter());
});
},
Self::BlindedReceive {
- amt_msat, total_msat, outgoing_cltv_value, encrypted_tlvs,
+ sender_intended_htlc_amt_msat, total_msat, cltv_expiry_height, encrypted_tlvs,
intro_node_blinding_point,
} => {
_encode_varint_length_prefixed_tlv!(w, {
- (2, HighZeroBytesDroppedBigSize(*amt_msat), required),
- (4, HighZeroBytesDroppedBigSize(*outgoing_cltv_value), required),
+ (2, HighZeroBytesDroppedBigSize(*sender_intended_htlc_amt_msat), required),
+ (4, HighZeroBytesDroppedBigSize(*cltv_expiry_height), required),
(10, *encrypted_tlvs, required_vec),
(12, intro_node_blinding_point, option),
(18, HighZeroBytesDroppedBigSize(*total_msat), required)
payment_relay,
payment_constraints,
features,
- intro_node_blinding_point: intro_node_blinding_point.ok_or(DecodeError::InvalidValue)?,
+ intro_node_blinding_point,
})
},
ChaChaPolyReadAdapter { readable: BlindedPaymentTlvs::Receive(ReceiveTlvs {
})} => {
if total_msat.unwrap_or(0) > MAX_VALUE_MSAT { return Err(DecodeError::InvalidValue) }
Ok(Self::BlindedReceive {
- amt_msat: amt.ok_or(DecodeError::InvalidValue)?,
+ sender_intended_htlc_amt_msat: amt.ok_or(DecodeError::InvalidValue)?,
total_msat: total_msat.ok_or(DecodeError::InvalidValue)?,
- outgoing_cltv_value: cltv_value.ok_or(DecodeError::InvalidValue)?,
+ cltv_expiry_height: cltv_value.ok_or(DecodeError::InvalidValue)?,
payment_secret,
payment_constraints,
intro_node_blinding_point,
payment_data,
payment_metadata: payment_metadata.map(|w| w.0),
keysend_preimage,
- amt_msat: amt.ok_or(DecodeError::InvalidValue)?,
- outgoing_cltv_value: cltv_value.ok_or(DecodeError::InvalidValue)?,
+ sender_intended_htlc_amt_msat: amt.ok_or(DecodeError::InvalidValue)?,
+ cltv_expiry_height: cltv_value.ok_or(DecodeError::InvalidValue)?,
custom_tlvs,
})
}
payment_data: None,
payment_metadata: None,
keysend_preimage: None,
- amt_msat: 0x0badf00d01020304,
- outgoing_cltv_value: 0xffffffff,
+ sender_intended_htlc_amt_msat: 0x0badf00d01020304,
+ cltv_expiry_height: 0xffffffff,
custom_tlvs: vec![],
};
let encoded_value = outbound_msg.encode();
let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
let inbound_msg = ReadableArgs::read(&mut Cursor::new(&target_value[..]), (None, &&node_signer)).unwrap();
if let msgs::InboundOnionPayload::Receive {
- payment_data: None, amt_msat, outgoing_cltv_value, ..
+ payment_data: None, sender_intended_htlc_amt_msat, cltv_expiry_height, ..
} = inbound_msg {
- assert_eq!(amt_msat, 0x0badf00d01020304);
- assert_eq!(outgoing_cltv_value, 0xffffffff);
+ assert_eq!(sender_intended_htlc_amt_msat, 0x0badf00d01020304);
+ assert_eq!(cltv_expiry_height, 0xffffffff);
} else { panic!(); }
}
}),
payment_metadata: None,
keysend_preimage: None,
- amt_msat: 0x0badf00d01020304,
- outgoing_cltv_value: 0xffffffff,
+ sender_intended_htlc_amt_msat: 0x0badf00d01020304,
+ cltv_expiry_height: 0xffffffff,
custom_tlvs: vec![],
};
let encoded_value = outbound_msg.encode();
payment_secret,
total_msat: 0x1badca1f
}),
- amt_msat, outgoing_cltv_value,
+ sender_intended_htlc_amt_msat, cltv_expiry_height,
payment_metadata: None,
keysend_preimage: None,
custom_tlvs,
} = inbound_msg {
assert_eq!(payment_secret, expected_payment_secret);
- assert_eq!(amt_msat, 0x0badf00d01020304);
- assert_eq!(outgoing_cltv_value, 0xffffffff);
+ assert_eq!(sender_intended_htlc_amt_msat, 0x0badf00d01020304);
+ assert_eq!(cltv_expiry_height, 0xffffffff);
assert_eq!(custom_tlvs, vec![]);
} else { panic!(); }
}
payment_metadata: None,
keysend_preimage: None,
custom_tlvs: bad_type_range_tlvs,
- amt_msat: 0x0badf00d01020304,
- outgoing_cltv_value: 0xffffffff,
+ sender_intended_htlc_amt_msat: 0x0badf00d01020304,
+ cltv_expiry_height: 0xffffffff,
};
let encoded_value = msg.encode();
let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
payment_metadata: None,
keysend_preimage: None,
custom_tlvs: expected_custom_tlvs.clone(),
- amt_msat: 0x0badf00d01020304,
- outgoing_cltv_value: 0xffffffff,
+ sender_intended_htlc_amt_msat: 0x0badf00d01020304,
+ cltv_expiry_height: 0xffffffff,
};
let encoded_value = msg.encode();
let target_value = <Vec<u8>>::from_hex("2e02080badf00d010203040404ffffffffff0000000146c6616b021234ff0000000146c6616f084242424242424242").unwrap();
payment_metadata: None,
keysend_preimage: None,
custom_tlvs,
- amt_msat,
- outgoing_cltv_value,
+ sender_intended_htlc_amt_msat,
+ cltv_expiry_height: outgoing_cltv_value,
..
} = inbound_msg {
assert_eq!(custom_tlvs, expected_custom_tlvs);
- assert_eq!(amt_msat, 0x0badf00d01020304);
+ assert_eq!(sender_intended_htlc_amt_msat, 0x0badf00d01020304);
assert_eq!(outgoing_cltv_value, 0xffffffff);
} else { panic!(); }
}
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Functional tests for the BOLT 12 Offers payment flow.
+//!
+//! [`ChannelManager`] provides utilities to create [`Offer`]s and [`Refund`]s along with utilities
+//! to initiate and request payment for them, respectively. It also manages the payment flow via
+//! implementing [`OffersMessageHandler`]. This module tests that functionality, including the
+//! resulting [`Event`] generation.
+//!
+//! Two-node success tests use an announced channel:
+//!
+//! Alice --- Bob
+//!
+//! While two-node failure tests use an unannounced channel:
+//!
+//! Alice ... Bob
+//!
+//! Six-node tests use unannounced channels for the sender and recipient and announced channels for
+//! the rest of the network.
+//!
+//! nodes[4]
+//! / \
+//! / \
+//! / \
+//! Alice ... Bob -------- Charlie ... David
+//! \ /
+//! \ /
+//! \ /
+//! nodes[5]
+//!
+//! Unnamed nodes are needed to ensure unannounced nodes can create two-hop blinded paths.
+//!
+//! Nodes without channels are disconnected and connected as needed to ensure that deterministic
+//! blinded paths are used.
+
+use core::time::Duration;
+use crate::blinded_path::BlindedPath;
+use crate::events::{Event, MessageSendEventsProvider, PaymentPurpose};
+use crate::ln::channelmanager::{PaymentId, RecentPaymentDetails, Retry, self};
+use crate::ln::functional_test_utils::*;
+use crate::ln::msgs::{ChannelMessageHandler, Init, OnionMessage, OnionMessageHandler};
+use crate::offers::invoice::Bolt12Invoice;
+use crate::offers::invoice_error::InvoiceError;
+use crate::offers::invoice_request::InvoiceRequest;
+use crate::offers::parse::Bolt12SemanticError;
+use crate::onion_message::messenger::PeeledOnion;
+use crate::onion_message::offers::OffersMessage;
+use crate::onion_message::packet::ParsedOnionMessageContents;
+
+use crate::prelude::*;
+
+macro_rules! expect_recent_payment {
+ ($node: expr, $payment_state: path, $payment_id: expr) => {
+ match $node.node.list_recent_payments().first() {
+ Some(&$payment_state { payment_id: actual_payment_id, .. }) => {
+ assert_eq!($payment_id, actual_payment_id);
+ },
+ Some(_) => panic!("Unexpected recent payment state"),
+ None => panic!("No recent payments"),
+ }
+ }
+}
+
+fn connect_peers<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>) {
+ let node_id_a = node_a.node.get_our_node_id();
+ let node_id_b = node_b.node.get_our_node_id();
+
+ let init_a = Init {
+ features: node_a.init_features(&node_id_b),
+ networks: None,
+ remote_network_address: None,
+ };
+ let init_b = Init {
+ features: node_b.init_features(&node_id_a),
+ networks: None,
+ remote_network_address: None,
+ };
+
+ node_a.node.peer_connected(&node_id_b, &init_b, true).unwrap();
+ node_b.node.peer_connected(&node_id_a, &init_a, false).unwrap();
+ node_a.onion_messenger.peer_connected(&node_id_b, &init_b, true).unwrap();
+ node_b.onion_messenger.peer_connected(&node_id_a, &init_a, false).unwrap();
+}
+
+fn disconnect_peers<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, peers: &[&Node<'a, 'b, 'c>]) {
+ for node_b in peers {
+ node_a.node.peer_disconnected(&node_b.node.get_our_node_id());
+ node_b.node.peer_disconnected(&node_a.node.get_our_node_id());
+ node_a.onion_messenger.peer_disconnected(&node_b.node.get_our_node_id());
+ node_b.onion_messenger.peer_disconnected(&node_a.node.get_our_node_id());
+ }
+}
+
+fn route_bolt12_payment<'a, 'b, 'c>(
+ node: &Node<'a, 'b, 'c>, path: &[&Node<'a, 'b, 'c>], invoice: &Bolt12Invoice
+) {
+ // Monitor added when handling the invoice onion message.
+ check_added_monitors(node, 1);
+
+ let mut events = node.node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let ev = remove_first_msg_event_to_node(&path[0].node.get_our_node_id(), &mut events);
+
+ // Use a fake payment_hash and bypass checking for the PaymentClaimable event since the
+ // invoice contains the payment_hash but it was encrypted inside an onion message.
+ let amount_msats = invoice.amount_msats();
+ let payment_hash = invoice.payment_hash();
+ do_pass_along_path(
+ node, path, amount_msats, payment_hash, None, ev, false, false, None, false
+ );
+}
+
+fn claim_bolt12_payment<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, path: &[&Node<'a, 'b, 'c>]) {
+ let recipient = &path[path.len() - 1];
+ match get_event!(recipient, Event::PaymentClaimable) {
+ Event::PaymentClaimable {
+ purpose: PaymentPurpose::InvoicePayment {
+ payment_preimage: Some(payment_preimage), ..
+ }, ..
+ } => claim_payment(node, path, payment_preimage),
+ _ => panic!(),
+ };
+}
+
+fn extract_invoice_request<'a, 'b, 'c>(
+ node: &Node<'a, 'b, 'c>, message: &OnionMessage
+) -> (InvoiceRequest, Option<BlindedPath>) {
+ match node.onion_messenger.peel_onion_message(message) {
+ Ok(PeeledOnion::Receive(message, _, reply_path)) => match message {
+ ParsedOnionMessageContents::Offers(offers_message) => match offers_message {
+ OffersMessage::InvoiceRequest(invoice_request) => (invoice_request, reply_path),
+ OffersMessage::Invoice(invoice) => panic!("Unexpected invoice: {:?}", invoice),
+ OffersMessage::InvoiceError(error) => panic!("Unexpected invoice_error: {:?}", error),
+ },
+ ParsedOnionMessageContents::Custom(message) => panic!("Unexpected custom message: {:?}", message),
+ },
+ Ok(PeeledOnion::Forward(_, _)) => panic!("Unexpected onion message forward"),
+ Err(e) => panic!("Failed to process onion message {:?}", e),
+ }
+}
+
+fn extract_invoice<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, message: &OnionMessage) -> Bolt12Invoice {
+ match node.onion_messenger.peel_onion_message(message) {
+ Ok(PeeledOnion::Receive(message, _, _)) => match message {
+ ParsedOnionMessageContents::Offers(offers_message) => match offers_message {
+ OffersMessage::InvoiceRequest(invoice_request) => panic!("Unexpected invoice_request: {:?}", invoice_request),
+ OffersMessage::Invoice(invoice) => invoice,
+ OffersMessage::InvoiceError(error) => panic!("Unexpected invoice_error: {:?}", error),
+ },
+ ParsedOnionMessageContents::Custom(message) => panic!("Unexpected custom message: {:?}", message),
+ },
+ Ok(PeeledOnion::Forward(_, _)) => panic!("Unexpected onion message forward"),
+ Err(e) => panic!("Failed to process onion message {:?}", e),
+ }
+}
+
+fn extract_invoice_error<'a, 'b, 'c>(
+ node: &Node<'a, 'b, 'c>, message: &OnionMessage
+) -> InvoiceError {
+ match node.onion_messenger.peel_onion_message(message) {
+ Ok(PeeledOnion::Receive(message, _, _)) => match message {
+ ParsedOnionMessageContents::Offers(offers_message) => match offers_message {
+ OffersMessage::InvoiceRequest(invoice_request) => panic!("Unexpected invoice_request: {:?}", invoice_request),
+ OffersMessage::Invoice(invoice) => panic!("Unexpected invoice: {:?}", invoice),
+ OffersMessage::InvoiceError(error) => error,
+ },
+ ParsedOnionMessageContents::Custom(message) => panic!("Unexpected custom message: {:?}", message),
+ },
+ Ok(PeeledOnion::Forward(_, _)) => panic!("Unexpected onion message forward"),
+ Err(e) => panic!("Failed to process onion message {:?}", e),
+ }
+}
+
+/// Checks that an offer can be paid through blinded paths and that ephemeral pubkeys are used
+/// rather than exposing a node's pubkey.
+#[test]
+fn creates_and_pays_for_offer_using_two_hop_blinded_path() {
+ let mut accept_forward_cfg = test_default_channel_config();
+ accept_forward_cfg.accept_forwards_to_priv_channels = true;
+
+ let mut features = channelmanager::provided_init_features(&accept_forward_cfg);
+ features.set_onion_messages_optional();
+ features.set_route_blinding_optional();
+
+ let chanmon_cfgs = create_chanmon_cfgs(6);
+ let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
+
+ *node_cfgs[1].override_init_features.borrow_mut() = Some(features);
+
+ let node_chanmgrs = create_node_chanmgrs(
+ 6, &node_cfgs, &[None, Some(accept_forward_cfg), None, None, None, None]
+ );
+ let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+ create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 5, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 5, 10_000_000, 1_000_000_000);
+
+ let (alice, bob, charlie, david) = (&nodes[0], &nodes[1], &nodes[2], &nodes[3]);
+ let alice_id = alice.node.get_our_node_id();
+ let bob_id = bob.node.get_our_node_id();
+ let charlie_id = charlie.node.get_our_node_id();
+ let david_id = david.node.get_our_node_id();
+
+ disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5]]);
+ disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]);
+
+ let offer = alice.node
+ .create_offer_builder("coffee".to_string()).unwrap()
+ .amount_msats(10_000_000)
+ .build().unwrap();
+ assert_ne!(offer.signing_pubkey(), alice_id);
+ assert!(!offer.paths().is_empty());
+ for path in offer.paths() {
+ assert_eq!(path.introduction_node_id, bob_id);
+ }
+
+ let payment_id = PaymentId([1; 32]);
+ david.node.pay_for_offer(&offer, None, None, None, payment_id, Retry::Attempts(0), None)
+ .unwrap();
+ expect_recent_payment!(david, RecentPaymentDetails::AwaitingInvoice, payment_id);
+
+ connect_peers(david, bob);
+
+ let onion_message = david.onion_messenger.next_onion_message_for_peer(bob_id).unwrap();
+ bob.onion_messenger.handle_onion_message(&david_id, &onion_message);
+
+ connect_peers(alice, charlie);
+
+ let onion_message = bob.onion_messenger.next_onion_message_for_peer(alice_id).unwrap();
+ alice.onion_messenger.handle_onion_message(&bob_id, &onion_message);
+
+ let (invoice_request, reply_path) = extract_invoice_request(alice, &onion_message);
+ assert_eq!(invoice_request.amount_msats(), None);
+ assert_ne!(invoice_request.payer_id(), david_id);
+ assert_eq!(reply_path.unwrap().introduction_node_id, charlie_id);
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(charlie_id).unwrap();
+ charlie.onion_messenger.handle_onion_message(&alice_id, &onion_message);
+
+ let onion_message = charlie.onion_messenger.next_onion_message_for_peer(david_id).unwrap();
+ david.onion_messenger.handle_onion_message(&charlie_id, &onion_message);
+
+ let invoice = extract_invoice(david, &onion_message);
+ assert_eq!(invoice.amount_msats(), 10_000_000);
+ assert_ne!(invoice.signing_pubkey(), alice_id);
+ assert!(!invoice.payment_paths().is_empty());
+ for (_, path) in invoice.payment_paths() {
+ assert_eq!(path.introduction_node_id, bob_id);
+ }
+
+ route_bolt12_payment(david, &[charlie, bob, alice], &invoice);
+ expect_recent_payment!(david, RecentPaymentDetails::Pending, payment_id);
+
+ claim_bolt12_payment(david, &[charlie, bob, alice]);
+ expect_recent_payment!(david, RecentPaymentDetails::Fulfilled, payment_id);
+}
+
+/// Checks that a refund can be paid through blinded paths and that ephemeral pubkeys are used
+/// rather than exposing a node's pubkey.
+#[test]
+fn creates_and_pays_for_refund_using_two_hop_blinded_path() {
+ let mut accept_forward_cfg = test_default_channel_config();
+ accept_forward_cfg.accept_forwards_to_priv_channels = true;
+
+ let mut features = channelmanager::provided_init_features(&accept_forward_cfg);
+ features.set_onion_messages_optional();
+ features.set_route_blinding_optional();
+
+ let chanmon_cfgs = create_chanmon_cfgs(6);
+ let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
+
+ *node_cfgs[1].override_init_features.borrow_mut() = Some(features);
+
+ let node_chanmgrs = create_node_chanmgrs(
+ 6, &node_cfgs, &[None, Some(accept_forward_cfg), None, None, None, None]
+ );
+ let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+ create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 5, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 5, 10_000_000, 1_000_000_000);
+
+ let (alice, bob, charlie, david) = (&nodes[0], &nodes[1], &nodes[2], &nodes[3]);
+ let alice_id = alice.node.get_our_node_id();
+ let bob_id = bob.node.get_our_node_id();
+ let charlie_id = charlie.node.get_our_node_id();
+ let david_id = david.node.get_our_node_id();
+
+ disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5]]);
+ disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]);
+
+ let absolute_expiry = Duration::from_secs(u64::MAX);
+ let payment_id = PaymentId([1; 32]);
+ let refund = david.node
+ .create_refund_builder(
+ "refund".to_string(), 10_000_000, absolute_expiry, payment_id, Retry::Attempts(0), None
+ )
+ .unwrap()
+ .build().unwrap();
+ assert_eq!(refund.amount_msats(), 10_000_000);
+ assert_eq!(refund.absolute_expiry(), Some(absolute_expiry));
+ assert_ne!(refund.payer_id(), david_id);
+ assert!(!refund.paths().is_empty());
+ for path in refund.paths() {
+ assert_eq!(path.introduction_node_id, charlie_id);
+ }
+ expect_recent_payment!(david, RecentPaymentDetails::AwaitingInvoice, payment_id);
+
+ alice.node.request_refund_payment(&refund).unwrap();
+
+ connect_peers(alice, charlie);
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(charlie_id).unwrap();
+ charlie.onion_messenger.handle_onion_message(&alice_id, &onion_message);
+
+ let onion_message = charlie.onion_messenger.next_onion_message_for_peer(david_id).unwrap();
+ david.onion_messenger.handle_onion_message(&charlie_id, &onion_message);
+
+ let invoice = extract_invoice(david, &onion_message);
+ assert_eq!(invoice.amount_msats(), 10_000_000);
+ assert_ne!(invoice.signing_pubkey(), alice_id);
+ assert!(!invoice.payment_paths().is_empty());
+ for (_, path) in invoice.payment_paths() {
+ assert_eq!(path.introduction_node_id, bob_id);
+ }
+
+ route_bolt12_payment(david, &[charlie, bob, alice], &invoice);
+ expect_recent_payment!(david, RecentPaymentDetails::Pending, payment_id);
+
+ claim_bolt12_payment(david, &[charlie, bob, alice]);
+ expect_recent_payment!(david, RecentPaymentDetails::Fulfilled, payment_id);
+}
+
+/// Checks that an offer can be paid through a one-hop blinded path and that ephemeral pubkeys are
+/// used rather than exposing a node's pubkey. However, the node's pubkey is still used as the
+/// introduction node of the blinded path.
+#[test]
+fn creates_and_pays_for_offer_using_one_hop_blinded_path() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ let alice = &nodes[0];
+ let alice_id = alice.node.get_our_node_id();
+ let bob = &nodes[1];
+ let bob_id = bob.node.get_our_node_id();
+
+ let offer = alice.node
+ .create_offer_builder("coffee".to_string()).unwrap()
+ .amount_msats(10_000_000)
+ .build().unwrap();
+ assert_ne!(offer.signing_pubkey(), alice_id);
+ assert!(!offer.paths().is_empty());
+ for path in offer.paths() {
+ assert_eq!(path.introduction_node_id, alice_id);
+ }
+
+ let payment_id = PaymentId([1; 32]);
+ bob.node.pay_for_offer(&offer, None, None, None, payment_id, Retry::Attempts(0), None).unwrap();
+ expect_recent_payment!(bob, RecentPaymentDetails::AwaitingInvoice, payment_id);
+
+ let onion_message = bob.onion_messenger.next_onion_message_for_peer(alice_id).unwrap();
+ alice.onion_messenger.handle_onion_message(&bob_id, &onion_message);
+
+ let (invoice_request, reply_path) = extract_invoice_request(alice, &onion_message);
+ assert_eq!(invoice_request.amount_msats(), None);
+ assert_ne!(invoice_request.payer_id(), bob_id);
+ assert_eq!(reply_path.unwrap().introduction_node_id, bob_id);
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap();
+ bob.onion_messenger.handle_onion_message(&alice_id, &onion_message);
+
+ let invoice = extract_invoice(bob, &onion_message);
+ assert_eq!(invoice.amount_msats(), 10_000_000);
+ assert_ne!(invoice.signing_pubkey(), alice_id);
+ assert!(!invoice.payment_paths().is_empty());
+ for (_, path) in invoice.payment_paths() {
+ assert_eq!(path.introduction_node_id, alice_id);
+ }
+
+ route_bolt12_payment(bob, &[alice], &invoice);
+ expect_recent_payment!(bob, RecentPaymentDetails::Pending, payment_id);
+
+ claim_bolt12_payment(bob, &[alice]);
+ expect_recent_payment!(bob, RecentPaymentDetails::Fulfilled, payment_id);
+}
+
+/// Checks that a refund can be paid through a one-hop blinded path and that ephemeral pubkeys are
+/// used rather than exposing a node's pubkey. However, the node's pubkey is still used as the
+/// introduction node of the blinded path.
+#[test]
+fn creates_and_pays_for_refund_using_one_hop_blinded_path() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ let alice = &nodes[0];
+ let alice_id = alice.node.get_our_node_id();
+ let bob = &nodes[1];
+ let bob_id = bob.node.get_our_node_id();
+
+ let absolute_expiry = Duration::from_secs(u64::MAX);
+ let payment_id = PaymentId([1; 32]);
+ let refund = bob.node
+ .create_refund_builder(
+ "refund".to_string(), 10_000_000, absolute_expiry, payment_id, Retry::Attempts(0), None
+ )
+ .unwrap()
+ .build().unwrap();
+ assert_eq!(refund.amount_msats(), 10_000_000);
+ assert_eq!(refund.absolute_expiry(), Some(absolute_expiry));
+ assert_ne!(refund.payer_id(), bob_id);
+ assert!(!refund.paths().is_empty());
+ for path in refund.paths() {
+ assert_eq!(path.introduction_node_id, bob_id);
+ }
+ expect_recent_payment!(bob, RecentPaymentDetails::AwaitingInvoice, payment_id);
+
+ alice.node.request_refund_payment(&refund).unwrap();
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap();
+ bob.onion_messenger.handle_onion_message(&alice_id, &onion_message);
+
+ let invoice = extract_invoice(bob, &onion_message);
+ assert_eq!(invoice.amount_msats(), 10_000_000);
+ assert_ne!(invoice.signing_pubkey(), alice_id);
+ assert!(!invoice.payment_paths().is_empty());
+ for (_, path) in invoice.payment_paths() {
+ assert_eq!(path.introduction_node_id, alice_id);
+ }
+
+ route_bolt12_payment(bob, &[alice], &invoice);
+ expect_recent_payment!(bob, RecentPaymentDetails::Pending, payment_id);
+
+ claim_bolt12_payment(bob, &[alice]);
+ expect_recent_payment!(bob, RecentPaymentDetails::Fulfilled, payment_id);
+}
+
+/// Checks that an invoice for an offer without any blinded paths can be requested. Note that while
+/// the requested is sent directly using the node's pubkey, the response and the payment still use
+/// blinded paths as required by the spec.
+#[test]
+fn pays_for_offer_without_blinded_paths() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ let alice = &nodes[0];
+ let alice_id = alice.node.get_our_node_id();
+ let bob = &nodes[1];
+ let bob_id = bob.node.get_our_node_id();
+
+ let offer = alice.node
+ .create_offer_builder("coffee".to_string()).unwrap()
+ .clear_paths()
+ .amount_msats(10_000_000)
+ .build().unwrap();
+ assert_eq!(offer.signing_pubkey(), alice_id);
+ assert!(offer.paths().is_empty());
+
+ let payment_id = PaymentId([1; 32]);
+ bob.node.pay_for_offer(&offer, None, None, None, payment_id, Retry::Attempts(0), None).unwrap();
+ expect_recent_payment!(bob, RecentPaymentDetails::AwaitingInvoice, payment_id);
+
+ let onion_message = bob.onion_messenger.next_onion_message_for_peer(alice_id).unwrap();
+ alice.onion_messenger.handle_onion_message(&bob_id, &onion_message);
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap();
+ bob.onion_messenger.handle_onion_message(&alice_id, &onion_message);
+
+ let invoice = extract_invoice(bob, &onion_message);
+ route_bolt12_payment(bob, &[alice], &invoice);
+ expect_recent_payment!(bob, RecentPaymentDetails::Pending, payment_id);
+
+ claim_bolt12_payment(bob, &[alice]);
+ expect_recent_payment!(bob, RecentPaymentDetails::Fulfilled, payment_id);
+}
+
+/// Checks that a refund without any blinded paths can be paid. Note that while the invoice is sent
+/// directly using the node's pubkey, the payment still use blinded paths as required by the spec.
+#[test]
+fn pays_for_refund_without_blinded_paths() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ let alice = &nodes[0];
+ let alice_id = alice.node.get_our_node_id();
+ let bob = &nodes[1];
+ let bob_id = bob.node.get_our_node_id();
+
+ let absolute_expiry = Duration::from_secs(u64::MAX);
+ let payment_id = PaymentId([1; 32]);
+ let refund = bob.node
+ .create_refund_builder(
+ "refund".to_string(), 10_000_000, absolute_expiry, payment_id, Retry::Attempts(0), None
+ )
+ .unwrap()
+ .clear_paths()
+ .build().unwrap();
+ assert_eq!(refund.payer_id(), bob_id);
+ assert!(refund.paths().is_empty());
+ expect_recent_payment!(bob, RecentPaymentDetails::AwaitingInvoice, payment_id);
+
+ alice.node.request_refund_payment(&refund).unwrap();
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap();
+ bob.onion_messenger.handle_onion_message(&alice_id, &onion_message);
+
+ let invoice = extract_invoice(bob, &onion_message);
+ route_bolt12_payment(bob, &[alice], &invoice);
+ expect_recent_payment!(bob, RecentPaymentDetails::Pending, payment_id);
+
+ claim_bolt12_payment(bob, &[alice]);
+ expect_recent_payment!(bob, RecentPaymentDetails::Fulfilled, payment_id);
+}
+
+/// Fails creating an offer when a blinded path cannot be created without exposing the node's id.
+#[test]
+fn fails_creating_offer_without_blinded_paths() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ match nodes[0].node.create_offer_builder("coffee".to_string()) {
+ Ok(_) => panic!("Expected error"),
+ Err(e) => assert_eq!(e, Bolt12SemanticError::MissingPaths),
+ }
+}
+
+/// Fails creating a refund when a blinded path cannot be created without exposing the node's id.
+#[test]
+fn fails_creating_refund_without_blinded_paths() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ let absolute_expiry = Duration::from_secs(u64::MAX);
+ let payment_id = PaymentId([1; 32]);
+
+ match nodes[0].node.create_refund_builder(
+ "refund".to_string(), 10_000, absolute_expiry, payment_id, Retry::Attempts(0), None
+ ) {
+ Ok(_) => panic!("Expected error"),
+ Err(e) => assert_eq!(e, Bolt12SemanticError::MissingPaths),
+ }
+
+ assert!(nodes[0].node.list_recent_payments().is_empty());
+}
+
+/// Fails creating an invoice request when a blinded reply path cannot be created without exposing
+/// the node's id.
+#[test]
+fn fails_creating_invoice_request_without_blinded_reply_path() {
+ let chanmon_cfgs = create_chanmon_cfgs(6);
+ let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
+ let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+ create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 5, 10_000_000, 1_000_000_000);
+
+ let (alice, bob, charlie, david) = (&nodes[0], &nodes[1], &nodes[2], &nodes[3]);
+
+ disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5]]);
+ disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]);
+
+ let offer = alice.node
+ .create_offer_builder("coffee".to_string()).unwrap()
+ .amount_msats(10_000_000)
+ .build().unwrap();
+
+ let payment_id = PaymentId([1; 32]);
+
+ match david.node.pay_for_offer(&offer, None, None, None, payment_id, Retry::Attempts(0), None) {
+ Ok(_) => panic!("Expected error"),
+ Err(e) => assert_eq!(e, Bolt12SemanticError::MissingPaths),
+ }
+
+ assert!(nodes[0].node.list_recent_payments().is_empty());
+}
+
+#[test]
+fn fails_creating_invoice_request_with_duplicate_payment_id() {
+ let chanmon_cfgs = create_chanmon_cfgs(6);
+ let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
+ let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+ create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 5, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 5, 10_000_000, 1_000_000_000);
+
+ let (alice, _bob, charlie, david) = (&nodes[0], &nodes[1], &nodes[2], &nodes[3]);
+
+ disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5]]);
+
+ let offer = alice.node
+ .create_offer_builder("coffee".to_string()).unwrap()
+ .amount_msats(10_000_000)
+ .build().unwrap();
+
+ let payment_id = PaymentId([1; 32]);
+ assert!(
+ david.node.pay_for_offer(
+ &offer, None, None, None, payment_id, Retry::Attempts(0), None
+ ).is_ok()
+ );
+ expect_recent_payment!(david, RecentPaymentDetails::AwaitingInvoice, payment_id);
+
+ match david.node.pay_for_offer(&offer, None, None, None, payment_id, Retry::Attempts(0), None) {
+ Ok(_) => panic!("Expected error"),
+ Err(e) => assert_eq!(e, Bolt12SemanticError::DuplicatePaymentId),
+ }
+
+ expect_recent_payment!(david, RecentPaymentDetails::AwaitingInvoice, payment_id);
+}
+
+#[test]
+fn fails_creating_refund_with_duplicate_payment_id() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ let absolute_expiry = Duration::from_secs(u64::MAX);
+ let payment_id = PaymentId([1; 32]);
+ assert!(
+ nodes[0].node.create_refund_builder(
+ "refund".to_string(), 10_000, absolute_expiry, payment_id, Retry::Attempts(0), None
+ ).is_ok()
+ );
+ expect_recent_payment!(nodes[0], RecentPaymentDetails::AwaitingInvoice, payment_id);
+
+ match nodes[0].node.create_refund_builder(
+ "refund".to_string(), 10_000, absolute_expiry, payment_id, Retry::Attempts(0), None
+ ) {
+ Ok(_) => panic!("Expected error"),
+ Err(e) => assert_eq!(e, Bolt12SemanticError::DuplicatePaymentId),
+ }
+
+ expect_recent_payment!(nodes[0], RecentPaymentDetails::AwaitingInvoice, payment_id);
+}
+
+#[test]
+fn fails_sending_invoice_without_blinded_payment_paths_for_offer() {
+ let mut accept_forward_cfg = test_default_channel_config();
+ accept_forward_cfg.accept_forwards_to_priv_channels = true;
+
+ // Clearing route_blinding prevents forming any payment paths since the node is unannounced.
+ let mut features = channelmanager::provided_init_features(&accept_forward_cfg);
+ features.set_onion_messages_optional();
+ features.clear_route_blinding();
+
+ let chanmon_cfgs = create_chanmon_cfgs(6);
+ let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
+
+ *node_cfgs[1].override_init_features.borrow_mut() = Some(features);
+
+ let node_chanmgrs = create_node_chanmgrs(
+ 6, &node_cfgs, &[None, Some(accept_forward_cfg), None, None, None, None]
+ );
+ let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+ create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 5, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 5, 10_000_000, 1_000_000_000);
+
+ let (alice, bob, charlie, david) = (&nodes[0], &nodes[1], &nodes[2], &nodes[3]);
+ let alice_id = alice.node.get_our_node_id();
+ let bob_id = bob.node.get_our_node_id();
+ let charlie_id = charlie.node.get_our_node_id();
+ let david_id = david.node.get_our_node_id();
+
+ disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5]]);
+ disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]);
+
+ let offer = alice.node
+ .create_offer_builder("coffee".to_string()).unwrap()
+ .amount_msats(10_000_000)
+ .build().unwrap();
+
+ let payment_id = PaymentId([1; 32]);
+ david.node.pay_for_offer(&offer, None, None, None, payment_id, Retry::Attempts(0), None)
+ .unwrap();
+
+ connect_peers(david, bob);
+
+ let onion_message = david.onion_messenger.next_onion_message_for_peer(bob_id).unwrap();
+ bob.onion_messenger.handle_onion_message(&david_id, &onion_message);
+
+ connect_peers(alice, charlie);
+
+ let onion_message = bob.onion_messenger.next_onion_message_for_peer(alice_id).unwrap();
+ alice.onion_messenger.handle_onion_message(&bob_id, &onion_message);
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(charlie_id).unwrap();
+ charlie.onion_messenger.handle_onion_message(&alice_id, &onion_message);
+
+ let onion_message = charlie.onion_messenger.next_onion_message_for_peer(david_id).unwrap();
+ david.onion_messenger.handle_onion_message(&charlie_id, &onion_message);
+
+ let invoice_error = extract_invoice_error(david, &onion_message);
+ assert_eq!(invoice_error, InvoiceError::from(Bolt12SemanticError::MissingPaths));
+}
+
+#[test]
+fn fails_sending_invoice_without_blinded_payment_paths_for_refund() {
+ let mut accept_forward_cfg = test_default_channel_config();
+ accept_forward_cfg.accept_forwards_to_priv_channels = true;
+
+ // Clearing route_blinding prevents forming any payment paths since the node is unannounced.
+ let mut features = channelmanager::provided_init_features(&accept_forward_cfg);
+ features.set_onion_messages_optional();
+ features.clear_route_blinding();
+
+ let chanmon_cfgs = create_chanmon_cfgs(6);
+ let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
+
+ *node_cfgs[1].override_init_features.borrow_mut() = Some(features);
+
+ let node_chanmgrs = create_node_chanmgrs(
+ 6, &node_cfgs, &[None, Some(accept_forward_cfg), None, None, None, None]
+ );
+ let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+ create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 5, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 5, 10_000_000, 1_000_000_000);
+
+ let (alice, bob, charlie, david) = (&nodes[0], &nodes[1], &nodes[2], &nodes[3]);
+
+ disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5]]);
+ disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]);
+
+ let absolute_expiry = Duration::from_secs(u64::MAX);
+ let payment_id = PaymentId([1; 32]);
+ let refund = david.node
+ .create_refund_builder(
+ "refund".to_string(), 10_000_000, absolute_expiry, payment_id, Retry::Attempts(0), None
+ )
+ .unwrap()
+ .build().unwrap();
+
+ match alice.node.request_refund_payment(&refund) {
+ Ok(_) => panic!("Expected error"),
+ Err(e) => assert_eq!(e, Bolt12SemanticError::MissingPaths),
+ }
+}
+
+#[test]
+fn fails_paying_invoice_more_than_once() {
+ let mut accept_forward_cfg = test_default_channel_config();
+ accept_forward_cfg.accept_forwards_to_priv_channels = true;
+
+ let mut features = channelmanager::provided_init_features(&accept_forward_cfg);
+ features.set_onion_messages_optional();
+ features.set_route_blinding_optional();
+
+ let chanmon_cfgs = create_chanmon_cfgs(6);
+ let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
+
+ *node_cfgs[1].override_init_features.borrow_mut() = Some(features);
+
+ let node_chanmgrs = create_node_chanmgrs(
+ 6, &node_cfgs, &[None, Some(accept_forward_cfg), None, None, None, None]
+ );
+ let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+ create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 5, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 5, 10_000_000, 1_000_000_000);
+
+ let (alice, bob, charlie, david) = (&nodes[0], &nodes[1], &nodes[2], &nodes[3]);
+ let alice_id = alice.node.get_our_node_id();
+ let bob_id = bob.node.get_our_node_id();
+ let charlie_id = charlie.node.get_our_node_id();
+ let david_id = david.node.get_our_node_id();
+
+ disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5]]);
+ disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]);
+
+ let absolute_expiry = Duration::from_secs(u64::MAX);
+ let payment_id = PaymentId([1; 32]);
+ let refund = david.node
+ .create_refund_builder(
+ "refund".to_string(), 10_000_000, absolute_expiry, payment_id, Retry::Attempts(0), None
+ )
+ .unwrap()
+ .build().unwrap();
+ expect_recent_payment!(david, RecentPaymentDetails::AwaitingInvoice, payment_id);
+
+ // Alice sends the first invoice
+ alice.node.request_refund_payment(&refund).unwrap();
+
+ connect_peers(alice, charlie);
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(charlie_id).unwrap();
+ charlie.onion_messenger.handle_onion_message(&alice_id, &onion_message);
+
+ let onion_message = charlie.onion_messenger.next_onion_message_for_peer(david_id).unwrap();
+ david.onion_messenger.handle_onion_message(&charlie_id, &onion_message);
+
+ // David pays the first invoice
+ let invoice1 = extract_invoice(david, &onion_message);
+
+ route_bolt12_payment(david, &[charlie, bob, alice], &invoice1);
+ expect_recent_payment!(david, RecentPaymentDetails::Pending, payment_id);
+
+ claim_bolt12_payment(david, &[charlie, bob, alice]);
+ expect_recent_payment!(david, RecentPaymentDetails::Fulfilled, payment_id);
+
+ disconnect_peers(alice, &[charlie]);
+
+ // Alice sends the second invoice
+ alice.node.request_refund_payment(&refund).unwrap();
+
+ connect_peers(alice, charlie);
+ connect_peers(david, bob);
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(charlie_id).unwrap();
+ charlie.onion_messenger.handle_onion_message(&alice_id, &onion_message);
+
+ let onion_message = charlie.onion_messenger.next_onion_message_for_peer(david_id).unwrap();
+ david.onion_messenger.handle_onion_message(&charlie_id, &onion_message);
+
+ let invoice2 = extract_invoice(david, &onion_message);
+ assert_eq!(invoice1.payer_metadata(), invoice2.payer_metadata());
+
+ // David sends an error instead of paying the second invoice
+ let onion_message = david.onion_messenger.next_onion_message_for_peer(bob_id).unwrap();
+ bob.onion_messenger.handle_onion_message(&david_id, &onion_message);
+
+ let onion_message = bob.onion_messenger.next_onion_message_for_peer(alice_id).unwrap();
+ alice.onion_messenger.handle_onion_message(&bob_id, &onion_message);
+
+ let invoice_error = extract_invoice_error(alice, &onion_message);
+ assert_eq!(invoice_error, InvoiceError::from_string("DuplicateInvoice".to_string()));
+}
use crate::blinded_path::payment::{PaymentConstraints, PaymentRelay};
use crate::chain::channelmonitor::{HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS};
use crate::ln::PaymentHash;
-use crate::ln::channelmanager::{BlindedForward, CLTV_FAR_FAR_AWAY, HTLCFailureMsg, MIN_CLTV_EXPIRY_DELTA, PendingHTLCInfo, PendingHTLCRouting};
+use crate::ln::channelmanager::{BlindedFailure, BlindedForward, CLTV_FAR_FAR_AWAY, HTLCFailureMsg, MIN_CLTV_EXPIRY_DELTA, PendingHTLCInfo, PendingHTLCRouting};
use crate::ln::features::BlindedHopFeatures;
use crate::ln::msgs;
use crate::ln::onion_utils;
/// Invalid inbound onion payment.
#[derive(Debug)]
-pub struct InboundOnionErr {
+pub struct InboundHTLCErr {
/// BOLT 4 error code.
pub err_code: u16,
/// Data attached to this error.
msg: &msgs::UpdateAddHTLC, hop_data: msgs::InboundOnionPayload, hop_hmac: [u8; 32],
new_packet_bytes: [u8; onion_utils::ONION_DATA_LEN], shared_secret: [u8; 32],
next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>
-) -> Result<PendingHTLCInfo, InboundOnionErr> {
+) -> Result<PendingHTLCInfo, InboundHTLCErr> {
debug_assert!(next_packet_pubkey_opt.is_some());
let outgoing_packet = msgs::OnionPacket {
version: 0,
};
let (
- short_channel_id, amt_to_forward, outgoing_cltv_value, inbound_blinding_point
+ short_channel_id, amt_to_forward, outgoing_cltv_value, intro_node_blinding_point
) = match hop_data {
msgs::InboundOnionPayload::Forward { short_channel_id, amt_to_forward, outgoing_cltv_value } =>
(short_channel_id, amt_to_forward, outgoing_cltv_value, None),
).map_err(|()| {
// We should be returning malformed here if `msg.blinding_point` is set, but this is
// unreachable right now since we checked it in `decode_update_add_htlc_onion`.
- InboundOnionErr {
+ InboundHTLCErr {
msg: "Underflow calculating outbound amount or cltv value for blinded forward",
err_code: INVALID_ONION_BLINDING,
err_data: vec![0; 32],
}
})?;
- (short_channel_id, amt_to_forward, outgoing_cltv_value, Some(intro_node_blinding_point))
+ (short_channel_id, amt_to_forward, outgoing_cltv_value, intro_node_blinding_point)
},
msgs::InboundOnionPayload::Receive { .. } | msgs::InboundOnionPayload::BlindedReceive { .. } =>
- return Err(InboundOnionErr {
+ return Err(InboundHTLCErr {
msg: "Final Node OnionHopData provided for us as an intermediary node",
err_code: 0x4000 | 22,
err_data: Vec::new(),
routing: PendingHTLCRouting::Forward {
onion_packet: outgoing_packet,
short_channel_id,
- blinded: inbound_blinding_point.map(|bp| BlindedForward { inbound_blinding_point: bp }),
+ blinded: intro_node_blinding_point.or(msg.blinding_point)
+ .map(|bp| BlindedForward {
+ inbound_blinding_point: bp,
+ failure: intro_node_blinding_point
+ .map(|_| BlindedFailure::FromIntroductionNode)
+ .unwrap_or(BlindedFailure::FromBlindedNode),
+ }),
},
payment_hash: msg.payment_hash,
incoming_shared_secret: shared_secret,
hop_data: msgs::InboundOnionPayload, shared_secret: [u8; 32], payment_hash: PaymentHash,
amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>, allow_underpay: bool,
counterparty_skimmed_fee_msat: Option<u64>, current_height: u32, accept_mpp_keysend: bool,
-) -> Result<PendingHTLCInfo, InboundOnionErr> {
+) -> Result<PendingHTLCInfo, InboundHTLCErr> {
let (
- payment_data, keysend_preimage, custom_tlvs, onion_amt_msat, outgoing_cltv_value,
+ payment_data, keysend_preimage, custom_tlvs, onion_amt_msat, onion_cltv_expiry,
payment_metadata, requires_blinded_error
) = match hop_data {
msgs::InboundOnionPayload::Receive {
- payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata, ..
+ payment_data, keysend_preimage, custom_tlvs, sender_intended_htlc_amt_msat,
+ cltv_expiry_height, payment_metadata, ..
} =>
- (payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata,
- false),
+ (payment_data, keysend_preimage, custom_tlvs, sender_intended_htlc_amt_msat,
+ cltv_expiry_height, payment_metadata, false),
msgs::InboundOnionPayload::BlindedReceive {
- amt_msat, total_msat, outgoing_cltv_value, payment_secret, intro_node_blinding_point,
- payment_constraints, ..
+ sender_intended_htlc_amt_msat, total_msat, cltv_expiry_height, payment_secret,
+ intro_node_blinding_point, payment_constraints, ..
} => {
- check_blinded_payment_constraints(amt_msat, cltv_expiry, &payment_constraints)
+ check_blinded_payment_constraints(
+ sender_intended_htlc_amt_msat, cltv_expiry, &payment_constraints
+ )
.map_err(|()| {
- InboundOnionErr {
+ InboundHTLCErr {
err_code: INVALID_ONION_BLINDING,
err_data: vec![0; 32],
msg: "Amount or cltv_expiry violated blinded payment constraints",
}
})?;
let payment_data = msgs::FinalOnionHopData { payment_secret, total_msat };
- (Some(payment_data), None, Vec::new(), amt_msat, outgoing_cltv_value, None,
- intro_node_blinding_point.is_none())
+ (Some(payment_data), None, Vec::new(), sender_intended_htlc_amt_msat, cltv_expiry_height,
+ None, intro_node_blinding_point.is_none())
}
msgs::InboundOnionPayload::Forward { .. } => {
- return Err(InboundOnionErr {
+ return Err(InboundHTLCErr {
err_code: 0x4000|22,
err_data: Vec::new(),
msg: "Got non final data with an HMAC of 0",
})
},
msgs::InboundOnionPayload::BlindedForward { .. } => {
- return Err(InboundOnionErr {
+ return Err(InboundHTLCErr {
err_code: INVALID_ONION_BLINDING,
err_data: vec![0; 32],
msg: "Got blinded non final data with an HMAC of 0",
}
};
// final_incorrect_cltv_expiry
- if outgoing_cltv_value > cltv_expiry {
- return Err(InboundOnionErr {
+ if onion_cltv_expiry > cltv_expiry {
+ return Err(InboundHTLCErr {
msg: "Upstream node set CLTV to less than the CLTV set by the sender",
err_code: 18,
err_data: cltv_expiry.to_be_bytes().to_vec()
let mut err_data = Vec::with_capacity(12);
err_data.extend_from_slice(&amt_msat.to_be_bytes());
err_data.extend_from_slice(¤t_height.to_be_bytes());
- return Err(InboundOnionErr {
+ return Err(InboundHTLCErr {
err_code: 0x4000 | 15, err_data,
msg: "The final CLTV expiry is too soon to handle",
});
(allow_underpay && onion_amt_msat >
amt_msat.saturating_add(counterparty_skimmed_fee_msat.unwrap_or(0)))
{
- return Err(InboundOnionErr {
+ return Err(InboundHTLCErr {
err_code: 19,
err_data: amt_msat.to_be_bytes().to_vec(),
msg: "Upstream node sent less than we were supposed to receive in payment",
// time discrepancies due to a hash collision with X.
let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).to_byte_array());
if hashed_preimage != payment_hash {
- return Err(InboundOnionErr {
+ return Err(InboundHTLCErr {
err_code: 0x4000|22,
err_data: Vec::new(),
msg: "Payment preimage didn't match payment hash",
});
}
if !accept_mpp_keysend && payment_data.is_some() {
- return Err(InboundOnionErr {
+ return Err(InboundHTLCErr {
err_code: 0x4000|22,
err_data: Vec::new(),
msg: "We don't support MPP keysend payments",
payment_data,
payment_preimage,
payment_metadata,
- incoming_cltv_expiry: outgoing_cltv_value,
+ incoming_cltv_expiry: onion_cltv_expiry,
custom_tlvs,
}
} else if let Some(data) = payment_data {
PendingHTLCRouting::Receive {
payment_data: data,
payment_metadata,
- incoming_cltv_expiry: outgoing_cltv_value,
+ incoming_cltv_expiry: onion_cltv_expiry,
phantom_shared_secret,
custom_tlvs,
requires_blinded_error,
}
} else {
- return Err(InboundOnionErr {
+ return Err(InboundHTLCErr {
err_code: 0x4000|0x2000|3,
err_data: Vec::new(),
msg: "We require payment_secrets",
incoming_shared_secret: shared_secret,
incoming_amt_msat: Some(amt_msat),
outgoing_amt_msat: onion_amt_msat,
- outgoing_cltv_value,
+ outgoing_cltv_value: onion_cltv_expiry,
skimmed_fee_msat: counterparty_skimmed_fee_msat,
})
}
pub fn peel_payment_onion<NS: Deref, L: Deref, T: secp256k1::Verification>(
msg: &msgs::UpdateAddHTLC, node_signer: &NS, logger: &L, secp_ctx: &Secp256k1<T>,
cur_height: u32, accept_mpp_keysend: bool, allow_skimmed_fees: bool,
-) -> Result<PendingHTLCInfo, InboundOnionErr>
+) -> Result<PendingHTLCInfo, InboundHTLCErr>
where
NS::Target: NodeSigner,
L::Target: Logger,
HTLCFailureMsg::Relay(r) => (0x4000 | 22, r.reason.data),
};
let msg = "Failed to decode update add htlc onion";
- InboundOnionErr { msg, err_code, err_data }
+ InboundHTLCErr { msg, err_code, err_data }
})?;
Ok(match hop {
onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
} = match next_packet_details_opt {
Some(next_packet_details) => next_packet_details,
// Forward should always include the next hop details
- None => return Err(InboundOnionErr {
+ None => return Err(InboundHTLCErr {
msg: "Failed to decode update add htlc onion",
err_code: 0x4000 | 22,
err_data: Vec::new(),
if let Err((err_msg, code)) = check_incoming_htlc_cltv(
cur_height, outgoing_cltv_value, msg.cltv_expiry
) {
- return Err(InboundOnionErr {
+ return Err(InboundHTLCErr {
msg: err_msg,
err_code: code,
err_data: Vec::new(),
use crate::routing::gossip::NetworkUpdate;
use crate::routing::router::{BlindedTail, Path, RouteHop};
use crate::sign::NodeSigner;
-use crate::util::chacha20::{ChaCha20, ChaChaReader};
+use crate::crypto::chacha20::ChaCha20;
+use crate::crypto::streams::ChaChaReader;
use crate::util::errors::{self, APIError};
use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, LengthCalculatingWriter};
use crate::util::logger::Logger;
for (i, blinded_hop) in hops.iter().enumerate() {
if i == hops.len() - 1 {
cur_value_msat += final_value_msat;
- cur_cltv += excess_final_cltv_expiry_delta;
res.push(msgs::OutboundOnionPayload::BlindedReceive {
- amt_msat: *final_value_msat,
+ sender_intended_htlc_amt_msat: *final_value_msat,
total_msat,
- outgoing_cltv_value: cltv,
+ cltv_expiry_height: cur_cltv + excess_final_cltv_expiry_delta,
encrypted_tlvs: blinded_hop.encrypted_payload.clone(),
intro_node_blinding_point: blinding_point.take(),
});
payment_metadata: recipient_onion.payment_metadata.take(),
keysend_preimage: *keysend_preimage,
custom_tlvs: recipient_onion.custom_tlvs.clone(),
- amt_msat: value_msat,
- outgoing_cltv_value: cltv,
+ sender_intended_htlc_amt_msat: value_msat,
+ cltv_expiry_height: cltv,
});
}
} else {
pub(crate) network_update: Option<NetworkUpdate>,
pub(crate) short_channel_id: Option<u64>,
pub(crate) payment_failed_permanently: bool,
+ pub(crate) failed_within_blinded_path: bool,
#[cfg(test)]
pub(crate) onion_error_code: Option<u16>,
#[cfg(test)]
network_update: Option<NetworkUpdate>,
short_channel_id: Option<u64>,
payment_failed_permanently: bool,
+ failed_within_blinded_path: bool,
}
let mut res: Option<FailureLearnings> = None;
let mut htlc_msat = *first_hop_htlc_msat;
error_code_ret = Some(BADONION | PERM | 24); // invalid_onion_blinding
error_packet_ret = Some(vec![0; 32]);
res = Some(FailureLearnings {
- network_update: None, short_channel_id: None, payment_failed_permanently: false
+ network_update: None, short_channel_id: None, payment_failed_permanently: false,
+ failed_within_blinded_path: true,
});
return
},
}
res = Some(FailureLearnings {
- network_update: None, short_channel_id: None, payment_failed_permanently: false
+ network_update: None, short_channel_id: None, payment_failed_permanently: false,
+ failed_within_blinded_path: true,
});
return
}
});
let short_channel_id = Some(route_hop.short_channel_id);
res = Some(FailureLearnings {
- network_update, short_channel_id, payment_failed_permanently: is_from_final_node
+ network_update, short_channel_id, payment_failed_permanently: is_from_final_node,
+ failed_within_blinded_path: false
});
return
}
res = Some(FailureLearnings {
network_update, short_channel_id,
- payment_failed_permanently: error_code & PERM == PERM && is_from_final_node
+ payment_failed_permanently: error_code & PERM == PERM && is_from_final_node,
+ failed_within_blinded_path: false
});
let (description, title) = errors::get_onion_error_description(error_code);
}
}).expect("Route that we sent via spontaneously grew invalid keys in the middle of it?");
if let Some(FailureLearnings {
- network_update, short_channel_id, payment_failed_permanently
+ network_update, short_channel_id, payment_failed_permanently, failed_within_blinded_path
}) = res {
DecodedOnionFailure {
- network_update, short_channel_id, payment_failed_permanently,
+ network_update, short_channel_id, payment_failed_permanently, failed_within_blinded_path,
#[cfg(test)]
onion_error_code: error_code_ret,
#[cfg(test)]
// payment not retryable only when garbage is from the final node
DecodedOnionFailure {
network_update: None, short_channel_id: None, payment_failed_permanently: is_from_final_node,
+ failed_within_blinded_path: false,
#[cfg(test)]
onion_error_code: None,
#[cfg(test)]
network_update: None,
payment_failed_permanently: false,
short_channel_id: Some(path.hops[0].short_channel_id),
+ failed_within_blinded_path: false,
#[cfg(test)]
onion_error_code: Some(*failure_code),
#[cfg(test)]
use crate::ln::channelmanager::{ChannelDetails, EventCompletionAction, HTLCSource, PaymentId};
use crate::ln::onion_utils::{DecodedOnionFailure, HTLCFailReason};
use crate::offers::invoice::Bolt12Invoice;
-use crate::routing::router::{InFlightHtlcs, Path, PaymentParameters, Route, RouteParameters, Router};
+use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, PaymentParameters, Route, RouteParameters, Router};
use crate::util::errors::APIError;
use crate::util::logger::Logger;
use crate::util::time::Time;
-#[cfg(all(not(feature = "no-std"), test))]
+#[cfg(all(feature = "std", test))]
use crate::util::time::tests::SinceEpoch;
use crate::util::ser::ReadableArgs;
params.previously_failed_channels.push(scid);
}
}
+ pub fn insert_previously_failed_blinded_path(&mut self, blinded_tail: &BlindedTail) {
+ if let PendingOutboundPayment::Retryable { payment_params: Some(params), .. } = self {
+ params.insert_previously_failed_blinded_path(blinded_tail);
+ }
+ }
fn is_awaiting_invoice(&self) -> bool {
match self {
PendingOutboundPayment::AwaitingInvoice { .. } => true,
if insert_res {
if let PendingOutboundPayment::Retryable {
ref mut pending_amt_msat, ref mut pending_fee_msat,
- ref mut remaining_max_total_routing_fee_msat, ..
+ ref mut remaining_max_total_routing_fee_msat, ..
} = self {
*pending_amt_msat += path.final_value_msat();
let path_fee_msat = path.fee_msat();
/// retry, and may retry multiple failed HTLCs at once if they failed around the same time and
/// were retried along a route from a single call to [`Router::find_route_with_id`].
Attempts(u32),
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
/// Time elapsed before abandoning retries for a payment. At least one attempt at payment is made;
/// see [`PaymentParameters::expiry_time`] to avoid any attempt at payment after a specific time.
///
Timeout(core::time::Duration),
}
-#[cfg(feature = "no-std")]
+#[cfg(not(feature = "std"))]
impl_writeable_tlv_based_enum!(Retry,
;
(0, Attempts)
);
-#[cfg(not(feature = "no-std"))]
+#[cfg(feature = "std")]
impl_writeable_tlv_based_enum!(Retry,
;
(0, Attempts),
(Retry::Attempts(max_retry_count), PaymentAttempts { count, .. }) => {
max_retry_count > count
},
- #[cfg(all(not(feature = "no-std"), not(test)))]
+ #[cfg(all(feature = "std", not(test)))]
(Retry::Timeout(max_duration), PaymentAttempts { first_attempted_at, .. }) =>
*max_duration >= crate::util::time::MonotonicTime::now().duration_since(*first_attempted_at),
- #[cfg(all(not(feature = "no-std"), test))]
+ #[cfg(all(feature = "std", test))]
(Retry::Timeout(max_duration), PaymentAttempts { first_attempted_at, .. }) =>
*max_duration >= SinceEpoch::now().duration_since(*first_attempted_at),
}
/// it means the result of the first attempt is not known yet.
pub(crate) count: u32,
/// This field is only used when retry is `Retry::Timeout` which is only build with feature std
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
first_attempted_at: T,
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
phantom: core::marker::PhantomData<T>,
}
-#[cfg(not(any(feature = "no-std", test)))]
-type ConfiguredTime = crate::util::time::MonotonicTime;
-#[cfg(feature = "no-std")]
+#[cfg(not(feature = "std"))]
type ConfiguredTime = crate::util::time::Eternity;
-#[cfg(all(not(feature = "no-std"), test))]
+#[cfg(all(feature = "std", not(test)))]
+type ConfiguredTime = crate::util::time::MonotonicTime;
+#[cfg(all(feature = "std", test))]
type ConfiguredTime = SinceEpoch;
impl<T: Time> PaymentAttemptsUsingTime<T> {
pub(crate) fn new() -> Self {
PaymentAttemptsUsingTime {
count: 0,
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
first_attempted_at: T::now(),
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
phantom: core::marker::PhantomData,
}
}
impl<T: Time> Display for PaymentAttemptsUsingTime<T> {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
return write!(f, "attempts: {}", self.count);
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
return write!(
f,
"attempts: {}, duration: {}s",
#[cfg(test)]
let DecodedOnionFailure {
network_update, short_channel_id, payment_failed_permanently, onion_error_code,
- onion_error_data
+ onion_error_data, failed_within_blinded_path
} = onion_error.decode_onion_failure(secp_ctx, logger, &source);
#[cfg(not(test))]
- let DecodedOnionFailure { network_update, short_channel_id, payment_failed_permanently } =
- onion_error.decode_onion_failure(secp_ctx, logger, &source);
+ let DecodedOnionFailure {
+ network_update, short_channel_id, payment_failed_permanently, failed_within_blinded_path
+ } = onion_error.decode_onion_failure(secp_ctx, logger, &source);
let payment_is_probe = payment_is_probe(payment_hash, &payment_id, probing_cookie_secret);
let mut session_priv_bytes = [0; 32];
// next-hop is needlessly blaming us!
payment.get_mut().insert_previously_failed_scid(scid);
}
+ if failed_within_blinded_path {
+ debug_assert!(short_channel_id.is_none());
+ if let Some(bt) = &path.blinded_tail {
+ payment.get_mut().insert_previously_failed_blinded_path(&bt);
+ } else { debug_assert!(false); }
+ }
if payment_is_probe || !is_retryable_now || payment_failed_permanently {
let reason = if payment_failed_permanently {
let logger = test_utils::TestLogger::new();
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
- let router = test_utils::TestRouter::new(network_graph, &scorer);
+ let router = test_utils::TestRouter::new(network_graph, &logger, &scorer);
let secp_ctx = Secp256k1::new();
let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
let logger = test_utils::TestLogger::new();
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
- let router = test_utils::TestRouter::new(network_graph, &scorer);
+ let router = test_utils::TestRouter::new(network_graph, &logger, &scorer);
let secp_ctx = Secp256k1::new();
let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
let logger = test_utils::TestLogger::new();
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
- let router = test_utils::TestRouter::new(network_graph, &scorer);
+ let router = test_utils::TestRouter::new(network_graph, &logger, &scorer);
let secp_ctx = Secp256k1::new();
let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
let logger = test_utils::TestLogger::new();
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
- let router = test_utils::TestRouter::new(network_graph, &scorer);
+ let router = test_utils::TestRouter::new(network_graph, &logger, &scorer);
let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
let pending_events = Mutex::new(VecDeque::new());
let logger = test_utils::TestLogger::new();
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
- let router = test_utils::TestRouter::new(network_graph, &scorer);
+ let router = test_utils::TestRouter::new(network_graph, &logger, &scorer);
let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
let pending_events = Mutex::new(VecDeque::new());
let logger = test_utils::TestLogger::new();
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
- let router = test_utils::TestRouter::new(network_graph, &scorer);
+ let router = test_utils::TestRouter::new(network_graph, &logger, &scorer);
let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
let pending_events = Mutex::new(VecDeque::new());
let logger = test_utils::TestLogger::new();
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
- let router = test_utils::TestRouter::new(network_graph, &scorer);
+ let router = test_utils::TestRouter::new(network_graph, &logger, &scorer);
let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
let pending_events = Mutex::new(VecDeque::new());
use crate::ln::functional_test_utils;
use crate::ln::functional_test_utils::*;
use crate::routing::gossip::NodeId;
+
#[cfg(feature = "std")]
-use std::time::{SystemTime, Instant, Duration};
-#[cfg(not(feature = "no-std"))]
-use crate::util::time::tests::SinceEpoch;
+use {
+ crate::util::time::tests::SinceEpoch,
+ std::time::{SystemTime, Instant, Duration},
+};
#[test]
fn mpp_failure() {
let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 0);
} else if test == AutoRetry::FailTimeout {
- #[cfg(not(feature = "no-std"))] {
+ #[cfg(feature = "std")] {
// Ensure ChannelManager will not retry a payment if it times out due to Retry::Timeout.
nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
PaymentId(payment_hash.0), route_params, Retry::Timeout(Duration::from_secs(60))).unwrap();
use hex::DisplayHex;
-use crate::util::chacha20poly1305rfc::ChaCha20Poly1305RFC;
-use crate::util::crypto::hkdf_extract_expand_twice;
+use crate::crypto::chacha20poly1305rfc::ChaCha20Poly1305RFC;
+use crate::crypto::utils::hkdf_extract_expand_twice;
use crate::util::ser::VecWriter;
use core::ops::Deref;
nonce[4..].copy_from_slice(&n.to_le_bytes()[..]);
let mut chacha = ChaCha20Poly1305RFC::new(key, &nonce, h);
- if !chacha.decrypt(&cyphertext[0..cyphertext.len() - 16], res, &cyphertext[cyphertext.len() - 16..]) {
+ if chacha.variable_time_decrypt(&cyphertext[0..cyphertext.len() - 16], res, &cyphertext[cyphertext.len() - 16..]).is_err() {
return Err(LightningError{err: "Bad MAC".to_owned(), action: msgs::ErrorAction::DisconnectPeer{ msg: None }});
}
Ok(())
use crate::ln::features::{InitFeatures, NodeFeatures};
use crate::ln::msgs;
use crate::ln::msgs::{ChannelMessageHandler, LightningError, SocketAddress, OnionMessageHandler, RoutingMessageHandler};
-#[cfg(not(c_bindings))]
-use crate::ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager};
+use crate::util::macro_logger::DebugFundingChannelId;
use crate::util::ser::{VecWriter, Writeable, Writer};
use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor, NextNoiseStep, MessageBuf, MSG_BUF_ALLOC_SIZE};
use crate::ln::wire;
use crate::ln::wire::{Encode, Type};
-#[cfg(not(c_bindings))]
-use crate::onion_message::{SimpleArcOnionMessenger, SimpleRefOnionMessenger};
-use crate::onion_message::{CustomOnionMessageHandler, OffersMessage, OffersMessageHandler, OnionMessageContents, PendingOnionMessage};
+use crate::onion_message::messenger::{CustomOnionMessageHandler, PendingOnionMessage};
+use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
+use crate::onion_message::packet::OnionMessageContents;
use crate::routing::gossip::{NodeId, NodeAlias};
use crate::util::atomic_counter::AtomicCounter;
use crate::util::logger::{Logger, WithContext};
use std::error;
#[cfg(not(c_bindings))]
use {
+ crate::ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager},
+ crate::onion_message::messenger::{SimpleArcOnionMessenger, SimpleRefOnionMessenger},
crate::routing::gossip::{NetworkGraph, P2PGossipSync},
crate::sign::KeysManager,
crate::sync::Arc,
features.set_channel_type_optional();
features.set_scid_privacy_optional();
features.set_zero_conf_optional();
+ features.set_route_blinding_optional();
features
}
/// A message handler which handles onion messages. This should generally be an
/// [`OnionMessenger`], but can also be an [`IgnoringMessageHandler`].
///
- /// [`OnionMessenger`]: crate::onion_message::OnionMessenger
+ /// [`OnionMessenger`]: crate::onion_message::messenger::OnionMessenger
pub onion_message_handler: OM,
/// A message handler which handles custom messages. The only LDK-provided implementation is
/// Append a message to a peer's pending outbound/write buffer
fn enqueue_message<M: wire::Type>(&self, peer: &mut Peer, message: &M) {
- let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
+ let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
if is_gossip_msg(message.type_id()) {
log_gossip!(logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap().0));
} else {
macro_rules! insert_node_id {
() => {
- let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
+ let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
match self.node_id_to_descriptor.lock().unwrap().entry(peer.their_node_id.unwrap().0) {
hash_map::Entry::Occupied(e) => {
log_trace!(logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap().0));
peer.pending_read_buffer.resize(18, 0);
peer.pending_read_is_header = true;
- let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
+ let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
let message = match message_result {
Ok(x) => x,
Err(e) => {
}
if let wire::Message::GossipTimestampFilter(_msg) = message {
- // When supporting gossip messages, start inital gossip sync only after we receive
+ // When supporting gossip messages, start initial gossip sync only after we receive
// a GossipTimestampFilter
if peer_lock.their_features.as_ref().unwrap().supports_gossip_queries() &&
!peer_lock.sent_gossip_timestamp_filter {
for (_, peer_mutex) in peers.iter() {
let mut peer = peer_mutex.lock().unwrap();
- let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
if !peer.handshake_complete() ||
!peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
continue
}
debug_assert!(peer.their_node_id.is_some());
debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
+ let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
if peer.buffer_full_drop_gossip_broadcast() {
log_gossip!(logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
continue;
for (_, peer_mutex) in peers.iter() {
let mut peer = peer_mutex.lock().unwrap();
- let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
if !peer.handshake_complete() ||
!peer.should_forward_node_announcement(msg.contents.node_id) {
continue
}
debug_assert!(peer.their_node_id.is_some());
debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
+ let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
if peer.buffer_full_drop_gossip_broadcast() {
log_gossip!(logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
continue;
for (_, peer_mutex) in peers.iter() {
let mut peer = peer_mutex.lock().unwrap();
- let logger = WithContext::from(&self.logger, Some(peer.their_node_id.unwrap().0), None);
if !peer.handshake_complete() ||
!peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
continue
}
debug_assert!(peer.their_node_id.is_some());
debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
+ let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
if peer.buffer_full_drop_gossip_broadcast() {
log_gossip!(logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
continue;
log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
log_pubkey!(node_id),
&msg.temporary_channel_id,
- log_funding_channel_id!(msg.funding_txid, msg.funding_output_index));
+ DebugFundingChannelId(&msg.funding_txid, msg.funding_output_index));
// TODO: If the peer is gone we should generate a DiscardFunding event
// indicating to the wallet that they should just throw away this funding transaction
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
log_pubkey!(node_id));
}
// We do not have the peers write lock, so we just store that we're
- // about to disconenct the peer and do it after we finish
+ // about to disconnect the peer and do it after we finish
// processing most messages.
let msg = msg.map(|msg| wire::Message::<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>::Error(msg));
peers_to_disconnect.insert(node_id, msg);
log_trace!(logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
log_pubkey!(node_id), msg.data);
// We do not have the peers write lock, so we just store that we're
- // about to disconenct the peer and do it after we finish
+ // about to disconnect the peer and do it after we finish
// processing most messages.
peers_to_disconnect.insert(node_id, Some(wire::Message::Warning(msg)));
},
use crate::routing::gossip::RoutingFees;
use crate::routing::router::{PaymentParameters, RouteHint, RouteHintHop};
use crate::ln::features::ChannelTypeFeatures;
-use crate::ln::msgs;
+use crate::ln::{msgs, ChannelId};
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ChannelUpdate, ErrorAction};
use crate::ln::wire::Encode;
use crate::util::config::{UserConfig, MaxDustHTLCExposure};
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
- let channel_id = funding_output.to_channel_id();
+ let channel_id = ChannelId::v1_from_funding_outpoint(funding_output);
nodes[1].chain_monitor.complete_sole_pending_chan_update(&channel_id);
expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
use crate::chain::transaction::OutPoint;
use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields};
-use crate::ln::msgs;
+use crate::ln::{msgs, ChannelId};
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
use crate::util::test_channel_signer::TestChannelSigner;
use crate::util::test_utils;
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
let chan_0_monitor_serialized =
- get_monitor!(nodes[0], OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).encode();
+ get_monitor!(nodes[0], ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 })).encode();
reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
assert!(nodes_0_read.is_empty());
for monitor in node_0_monitors.drain(..) {
- assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
+ let funding_outpoint = monitor.get_funding_txo().0;
+ assert_eq!(nodes[0].chain_monitor.watch_channel(funding_outpoint, monitor),
Ok(ChannelMonitorUpdateStatus::Completed));
check_added_monitors!(nodes[0], 1);
}
// monitors and ChannelManager, for use later, if we don't want to persist both monitors.
let mut original_monitor = test_utils::TestVecWriter(Vec::new());
if !persist_both_monitors {
- for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
- if outpoint.to_channel_id() == chan_id_not_persisted {
+ for (outpoint, channel_id) in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+ if channel_id == chan_id_not_persisted {
assert!(original_monitor.0.is_empty());
nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
}
// crashed in between the two persistence calls - using one old ChannelMonitor and one new one,
// with the old ChannelManager.
let mut updated_monitor = test_utils::TestVecWriter(Vec::new());
- for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
- if outpoint.to_channel_id() == chan_id_persisted {
+ for (outpoint, channel_id) in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+ if channel_id == chan_id_persisted {
assert!(updated_monitor.0.is_empty());
nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut updated_monitor).unwrap();
}
}
// If `persist_both_monitors` is set, get the second monitor here as well
if persist_both_monitors {
- for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
- if outpoint.to_channel_id() == chan_id_not_persisted {
+ for (outpoint, channel_id) in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+ if channel_id == chan_id_not_persisted {
assert!(original_monitor.0.is_empty());
nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
}
assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
// Reload the node while a subset of the channels in the funding batch have persisted monitors.
- let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id();
+ let channel_id_1 = ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 });
let node_encoded = nodes[0].node.encode();
let channel_monitor_1_serialized = get_monitor!(nodes[0], channel_id_1).encode();
reload_node!(nodes[0], node_encoded, &[&channel_monitor_1_serialized], new_persister, new_chain_monitor, new_channel_manager);
use crate::prelude::*;
-use crate::ln::functional_test_utils::*;
+use crate::ln::{functional_test_utils::*, ChannelId};
fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
// Our on-chain HTLC-claim learning has a few properties worth testing:
let (_, _, chan_id, funding_tx) =
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000);
let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
- assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+ assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
let remote_txn_a = get_local_commitment_txn!(nodes[0], chan_id);
let remote_txn_b = get_local_commitment_txn!(nodes[1], chan_id);
// You may not use this file except in accordance with one or both of these
// licenses.
-//! Tests of our shutdown and closing_signed negotiation logic.
+//! Tests of our shutdown and closing_signed negotiation logic as well as some assorted force-close
+//! handling tests.
use crate::sign::{EntropySource, SignerProvider};
use crate::chain::ChannelMonitorUpdateStatus;
use crate::chain::transaction::OutPoint;
-use crate::events::{MessageSendEvent, HTLCDestination, MessageSendEventsProvider, ClosureReason};
+use crate::events::{Event, MessageSendEvent, HTLCDestination, MessageSendEventsProvider, ClosureReason};
use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId, RecipientOnionFields, Retry, ChannelShutdownState, ChannelDetails};
use crate::routing::router::{PaymentParameters, get_route, RouteParameters};
-use crate::ln::msgs;
+use crate::ln::{ChannelId, msgs};
use crate::ln::msgs::{ChannelMessageHandler, ErrorAction};
use crate::ln::onion_utils::INVALID_ONION_BLINDING;
use crate::ln::script::ShutdownScript;
use crate::util::config::UserConfig;
use crate::util::string::UntrustedString;
+use bitcoin::{Transaction, TxOut};
+use bitcoin::blockdata::locktime::absolute::LockTime;
use bitcoin::blockdata::script::Builder;
use bitcoin::blockdata::opcodes;
use bitcoin::network::constants::Network;
mine_transaction(&nodes[0], &tx);
mine_transaction(&nodes[1], &tx);
- nodes[0].node.close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.close_channel(&ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 }), &nodes[1].node.get_our_node_id()).unwrap();
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
// We test that in case of peer committing upfront to a script, if it changes at closing, we refuse to sign
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000);
- nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[2].node.get_our_node_id()).unwrap();
+ nodes[0].node.close_channel(&chan.2, &nodes[2].node.get_our_node_id()).unwrap();
let node_0_orig_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
let mut node_0_shutdown = node_0_orig_shutdown.clone();
node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
// We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000);
- nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[2].node.get_our_node_id()).unwrap();
+ nodes[0].node.close_channel(&chan.2, &nodes[2].node.get_our_node_id()).unwrap();
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
// We test that in case of peer committing upfront to a script, if it oesn't change at closing, we sign
nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
// We test that if case of peer non-signaling we don't enforce committed script at channel opening
*nodes[0].override_init_features.borrow_mut() = Some(nodes[0].node.init_features().clear_upfront_shutdown_script());
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
- nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
let node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_1_shutdown);
check_added_monitors!(nodes[1], 1);
// channel smoothly, opt-out is from channel initiator here
*nodes[0].override_init_features.borrow_mut() = None;
let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1000000, 1000000);
- nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[1], 1);
let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown);
//// We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close
//// channel smoothly
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
- nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[1], 1);
let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown);
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
- nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[1], 1);
// Use a segwit v0 script supported even without option_shutdown_anysegwit
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
- nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[1], 1);
// Use a non-v0 segwit script supported by option_shutdown_anysegwit
.expect(OnGetShutdownScriptpubkey { returns: supported_shutdown_script });
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
- match nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()) {
+ match nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()) {
Err(APIError::IncompatibleShutdownScript { script }) => {
assert_eq!(script.into_inner(), unsupported_shutdown_script.clone().into_inner());
},
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(_) => panic!("Expected error"),
}
- nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[1], 1);
// Use a non-v0 segwit script unsupported without option_shutdown_anysegwit
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
- nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[1], 1);
// Use a segwit v0 script with an unsupported witness program
let shutdown_script = ShutdownScript::try_from(script.clone()).unwrap();
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
- nodes[1].node.close_channel_with_feerate_and_script(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id(), None, Some(shutdown_script)).unwrap();
+ nodes[1].node.close_channel_with_feerate_and_script(&chan.2, &nodes[0].node.get_our_node_id(), None, Some(shutdown_script)).unwrap();
check_added_monitors!(nodes[1], 1);
let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
let shutdown_script = ShutdownScript::try_from(script).unwrap();
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
- let result = nodes[1].node.close_channel_with_feerate_and_script(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id(), None, Some(shutdown_script));
+ let result = nodes[1].node.close_channel_with_feerate_and_script(&chan.2, &nodes[0].node.get_our_node_id(), None, Some(shutdown_script));
assert_eq!(result, Err(APIError::APIMisuseError { err: "Cannot override shutdown script for a channel with one already set".to_string() }));
}
*feerate_lock *= 10;
}
- nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
- let chan_id = OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id();
+ let chan_id = chan.2;
nodes[0].node.close_channel_with_feerate_and_script(&chan_id, &nodes[1].node.get_our_node_id(), Some(253 * 10), None).unwrap();
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
do_outbound_update_no_early_closing_signed(true);
do_outbound_update_no_early_closing_signed(false);
}
+
+#[test]
+fn batch_funding_failure() {
+ // Provides test coverage of batch funding failure, which previously deadlocked
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
+ exchange_open_accept_chan(&nodes[0], &nodes[2], 1_000_000, 0);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ // Build a transaction which only has the output for one of the two channels we're trying to
+ // confirm. Previously this led to a deadlock in channel closure handling.
+ let mut tx = Transaction { version: 2, lock_time: LockTime::ZERO, input: Vec::new(), output: Vec::new() };
+ let mut chans = Vec::new();
+ for (idx, ev) in events.iter().enumerate() {
+ if let Event::FundingGenerationReady { temporary_channel_id, counterparty_node_id, output_script, .. } = ev {
+ if idx == 0 {
+ tx.output.push(TxOut { value: 1_000_000, script_pubkey: output_script.clone() });
+ }
+ chans.push((temporary_channel_id, counterparty_node_id));
+ } else { panic!(); }
+ }
+
+ // We should probably end up with an error for both channels, but currently we don't generate
+ // an error for the failing channel itself.
+ let err = "Error in transaction funding: Misuse error: No output matched the script_pubkey and value in the FundingGenerationReady event".to_string();
+ let close = [ExpectedCloseEvent::from_id_reason(ChannelId::v1_from_funding_txid(tx.txid().as_ref(), 0), true, ClosureReason::ProcessingError { err })];
+
+ nodes[0].node.batch_funding_transaction_generated(&chans, tx).unwrap_err();
+
+ get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+ check_closed_events(&nodes[0], &close);
+ assert_eq!(nodes[0].node.list_channels().len(), 0);
+}
self
}
+ pub(crate) fn clear_paths(mut self) -> Self {
+ self.offer.paths = None;
+ self
+ }
+
pub(super) fn build_unchecked(self) -> Offer {
self.build_without_checks()
}
#[cfg(test)]
impl<'a, T: secp256k1::Signing> RefundBuilder<'a, T> {
+ pub(crate) fn clear_paths(mut self) -> Self {
+ self.refund.paths = None;
+ self
+ }
+
fn features_unchecked(mut self, features: InvoiceRequestFeatures) -> Self {
self.refund.features = features;
self
use crate::sign::{EntropySource, NodeSigner, Recipient};
use crate::util::ser::{FixedLengthReader, LengthReadable, Writeable, Writer};
use crate::util::test_utils;
-use super::{CustomOnionMessageHandler, Destination, MessageRouter, OffersMessage, OffersMessageHandler, OnionMessageContents, OnionMessagePath, OnionMessenger, PendingOnionMessage, SendError};
+use super::messenger::{CustomOnionMessageHandler, Destination, MessageRouter, OnionMessagePath, OnionMessenger, PendingOnionMessage, SendError};
+use super::offers::{OffersMessage, OffersMessageHandler};
+use super::packet::{OnionMessageContents, Packet};
use bitcoin::network::constants::Network;
use bitcoin::hashes::hex::FromHex;
let sender_to_alice_packet_bytes_len = sender_to_alice_packet_bytes.len() as u64;
let mut reader = io::Cursor::new(sender_to_alice_packet_bytes);
let mut packet_reader = FixedLengthReader::new(&mut reader, sender_to_alice_packet_bytes_len);
- let sender_to_alice_packet: super::Packet =
- <super::Packet as LengthReadable>::read(&mut packet_reader).unwrap();
+ let sender_to_alice_packet: Packet =
+ <Packet as LengthReadable>::read(&mut packet_reader).unwrap();
let secp_ctx = Secp256k1::new();
let sender_to_alice_om = msgs::OnionMessage {
blinding_point: PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex("6363636363636363636363636363636363636363636363636363636363636363").unwrap()).unwrap()),
// You may not use this file except in accordance with one or both of these
// licenses.
-//! LDK sends, receives, and forwards onion messages via the [`OnionMessenger`]. See its docs for
-//! more information.
+//! LDK sends, receives, and forwards onion messages via this [`OnionMessenger`], which lives here,
+//! as well as various types, traits, and utilities that it uses.
use bitcoin::hashes::{Hash, HashEngine};
use bitcoin::hashes::hmac::{Hmac, HmacEngine};
use crate::blinded_path::utils;
use crate::events::{Event, EventHandler, EventsProvider};
use crate::sign::{EntropySource, NodeSigner, Recipient};
-#[cfg(not(c_bindings))]
-use crate::ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager};
use crate::ln::features::{InitFeatures, NodeFeatures};
use crate::ln::msgs::{self, OnionMessage, OnionMessageHandler, SocketAddress};
use crate::ln::onion_utils;
use crate::routing::gossip::{NetworkGraph, NodeId};
-pub use super::packet::OnionMessageContents;
+use super::packet::OnionMessageContents;
use super::packet::ParsedOnionMessageContents;
use super::offers::OffersMessageHandler;
use super::packet::{BIG_PACKET_HOP_DATA_LEN, ForwardControlTlvs, Packet, Payload, ReceiveControlTlvs, SMALL_PACKET_HOP_DATA_LEN};
#[cfg(not(c_bindings))]
use {
crate::sign::KeysManager,
+ crate::ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager},
crate::ln::peer_handler::IgnoringMessageHandler,
crate::sync::Arc,
};
/// # use lightning::blinded_path::BlindedPath;
/// # use lightning::sign::{EntropySource, KeysManager};
/// # use lightning::ln::peer_handler::IgnoringMessageHandler;
-/// # use lightning::onion_message::{OnionMessageContents, Destination, MessageRouter, OnionMessagePath, OnionMessenger};
+/// # use lightning::onion_message::messenger::{Destination, MessageRouter, OnionMessagePath, OnionMessenger};
+/// # use lightning::onion_message::packet::OnionMessageContents;
/// # use lightning::util::logger::{Logger, Record};
/// # use lightning::util::ser::{Writeable, Writer};
/// # use lightning::io;
}
}
+ #[cfg(test)]
+ pub(crate) fn set_offers_handler(&mut self, offers_handler: OMH) {
+ self.offers_handler = offers_handler;
+ }
+
/// Sends an [`OnionMessage`] with the given `contents` to `destination`.
///
/// See [`OnionMessenger`] for example usage.
self.enqueue_onion_message(path, contents, reply_path, format_args!(""))
}
+ pub(crate) fn peel_onion_message(
+ &self, msg: &OnionMessage
+ ) -> Result<PeeledOnion<<<CMH>::Target as CustomOnionMessageHandler>::CustomMessage>, ()> {
+ peel_onion_message(
+ msg, &self.secp_ctx, &*self.node_signer, &*self.logger, &*self.custom_handler
+ )
+ }
+
fn handle_onion_message_response<T: OnionMessageContents>(
&self, response: Option<T>, reply_path: Option<BlindedPath>, log_suffix: fmt::Arguments
) {
CMH::Target: CustomOnionMessageHandler,
{
fn handle_onion_message(&self, _peer_node_id: &PublicKey, msg: &OnionMessage) {
- match peel_onion_message(
- msg, &self.secp_ctx, &*self.node_signer, &*self.logger, &*self.custom_handler
- ) {
+ match self.peel_onion_message(msg) {
Ok(PeeledOnion::Receive(message, path_id, reply_path)) => {
log_trace!(
self.logger,
//!
//! [offers]: <https://github.com/lightning/bolts/pull/798>
//! [blinded paths]: crate::blinded_path::BlindedPath
+//! [`OnionMessenger`]: self::messenger::OnionMessenger
-mod messenger;
-mod offers;
-mod packet;
+pub mod messenger;
+pub mod offers;
+pub mod packet;
#[cfg(test)]
mod functional_tests;
-
-// Re-export structs so they can be imported with just the `onion_message::` module prefix.
-pub use self::messenger::{CustomOnionMessageHandler, DefaultMessageRouter, Destination, MessageRouter, OnionMessageContents, OnionMessagePath, OnionMessenger, PeeledOnion, PendingOnionMessage, SendError};
-pub use self::messenger::{create_onion_message, peel_onion_message};
-#[cfg(not(c_bindings))]
-pub use self::messenger::{SimpleArcOnionMessenger, SimpleRefOnionMessenger};
-pub use self::offers::{OffersMessage, OffersMessageHandler};
-pub use self::packet::{Packet, ParsedOnionMessageContents};
-pub(crate) use self::packet::ControlTlvs;
-pub(crate) use self::messenger::new_pending_onion_message;
use crate::offers::invoice_request::InvoiceRequest;
use crate::offers::invoice::Bolt12Invoice;
use crate::offers::parse::Bolt12ParseError;
-use crate::onion_message::OnionMessageContents;
+use crate::onion_message::packet::OnionMessageContents;
use crate::util::logger::Logger;
use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
#[cfg(not(c_bindings))]
///
/// The returned [`OffersMessage`], if any, is enqueued to be sent by [`OnionMessenger`].
///
- /// [`OnionMessenger`]: crate::onion_message::OnionMessenger
+ /// [`OnionMessenger`]: crate::onion_message::messenger::OnionMessenger
fn handle_message(&self, message: OffersMessage) -> Option<OffersMessage>;
/// Releases any [`OffersMessage`]s that need to be sent.
/// Typically, this is used for messages initiating a payment flow rather than in response to
/// another message. The latter should use the return value of [`Self::handle_message`].
#[cfg(c_bindings)]
- fn release_pending_messages(&self) -> Vec<(OffersMessage, crate::onion_message::Destination, Option<crate::blinded_path::BlindedPath>)> { vec![] }
+ fn release_pending_messages(&self) -> Vec<(OffersMessage, crate::onion_message::messenger::Destination, Option<crate::blinded_path::BlindedPath>)> { vec![] }
}
/// Possible BOLT 12 Offers messages sent and received via an [`OnionMessage`].
use crate::ln::onion_utils;
use super::messenger::CustomOnionMessageHandler;
use super::offers::OffersMessage;
-use crate::util::chacha20poly1305rfc::{ChaChaPolyReadAdapter, ChaChaPolyWriteAdapter};
+use crate::crypto::streams::{ChaChaPolyReadAdapter, ChaChaPolyWriteAdapter};
use crate::util::logger::Logger;
use crate::util::ser::{BigSize, FixedLengthReader, LengthRead, LengthReadable, LengthReadableArgs, Readable, ReadableArgs, Writeable, Writer};
// NOTE: In the case of no-std, we won't have access to the current UNIX time at the time of removal,
// so we'll just set the removal time here to the current UNIX time on the very next invocation
// of this function.
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
{
let mut tracked_time = Some(current_time_unix);
core::mem::swap(time, &mut tracked_time);
use crate::ln::features::{BlindedHopFeatures, Bolt11InvoiceFeatures, Bolt12InvoiceFeatures, ChannelFeatures, NodeFeatures};
use crate::ln::msgs::{DecodeError, ErrorAction, LightningError, MAX_VALUE_MSAT};
use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice};
-use crate::onion_message::{DefaultMessageRouter, Destination, MessageRouter, OnionMessagePath};
+use crate::onion_message::messenger::{DefaultMessageRouter, Destination, MessageRouter, OnionMessagePath};
use crate::routing::gossip::{DirectedChannelInfo, EffectiveCapacity, ReadOnlyNetworkGraph, NetworkGraph, NodeId, RoutingFees};
use crate::routing::scoring::{ChannelUsage, LockableScore, ScoreLookUp};
use crate::sign::EntropySource;
use crate::util::ser::{Writeable, Readable, ReadableArgs, Writer};
use crate::util::logger::{Level, Logger};
-use crate::util::chacha20::ChaCha20;
+use crate::crypto::chacha20::ChaCha20;
use crate::io;
use crate::prelude::*;
None => return None,
};
let payment_relay: PaymentRelay = match details.counterparty.forwarding_info {
- Some(forwarding_info) => forwarding_info.into(),
+ Some(forwarding_info) => match forwarding_info.try_into() {
+ Ok(payment_relay) => payment_relay,
+ Err(()) => return None,
+ },
None => return None,
};
- // Avoid exposing esoteric CLTV expiry deltas
- let cltv_expiry_delta = match payment_relay.cltv_expiry_delta {
- 0..=40 => 40u32,
- 41..=80 => 80u32,
- 81..=144 => 144u32,
- 145..=216 => 216u32,
- _ => return None,
- };
-
+ let cltv_expiry_delta = payment_relay.cltv_expiry_delta as u32;
let payment_constraints = PaymentConstraints {
max_cltv_expiry: tlvs.payment_constraints.max_cltv_expiry + cltv_expiry_delta,
htlc_minimum_msat: details.inbound_htlc_minimum_msat.unwrap_or(0),
/// payment to fail. Future attempts for the same payment shouldn't be relayed through any of
/// these SCIDs.
pub previously_failed_channels: Vec<u64>,
+
+ /// A list of indices corresponding to blinded paths in [`Payee::Blinded::route_hints`] which this
+ /// payment was previously attempted over and which caused the payment to fail. Future attempts
+ /// for the same payment shouldn't be relayed through any of these blinded paths.
+ pub previously_failed_blinded_path_idxs: Vec<u64>,
}
impl Writeable for PaymentParameters {
(7, self.previously_failed_channels, required_vec),
(8, *blinded_hints, optional_vec),
(9, self.payee.final_cltv_expiry_delta(), option),
+ (11, self.previously_failed_blinded_path_idxs, required_vec),
});
Ok(())
}
(7, previously_failed_channels, optional_vec),
(8, blinded_route_hints, optional_vec),
(9, final_cltv_expiry_delta, (default_value, default_final_cltv_expiry_delta)),
+ (11, previously_failed_blinded_path_idxs, optional_vec),
});
let blinded_route_hints = blinded_route_hints.unwrap_or(vec![]);
let payee = if blinded_route_hints.len() != 0 {
max_channel_saturation_power_of_half: _init_tlv_based_struct_field!(max_channel_saturation_power_of_half, (default_value, unused)),
expiry_time,
previously_failed_channels: previously_failed_channels.unwrap_or(Vec::new()),
+ previously_failed_blinded_path_idxs: previously_failed_blinded_path_idxs.unwrap_or(Vec::new()),
})
}
}
max_path_count: DEFAULT_MAX_PATH_COUNT,
max_channel_saturation_power_of_half: DEFAULT_MAX_CHANNEL_SATURATION_POW_HALF,
previously_failed_channels: Vec::new(),
+ previously_failed_blinded_path_idxs: Vec::new(),
}
}
max_path_count: DEFAULT_MAX_PATH_COUNT,
max_channel_saturation_power_of_half: DEFAULT_MAX_CHANNEL_SATURATION_POW_HALF,
previously_failed_channels: Vec::new(),
+ previously_failed_blinded_path_idxs: Vec::new(),
}
}
pub fn with_max_channel_saturation_power_of_half(self, max_channel_saturation_power_of_half: u8) -> Self {
Self { max_channel_saturation_power_of_half, ..self }
}
+
+ pub(crate) fn insert_previously_failed_blinded_path(&mut self, failed_blinded_tail: &BlindedTail) {
+ let mut found_blinded_tail = false;
+ for (idx, (_, path)) in self.payee.blinded_route_hints().iter().enumerate() {
+ if failed_blinded_tail.hops == path.blinded_hops &&
+ failed_blinded_tail.blinding_point == path.blinding_point
+ {
+ self.previously_failed_blinded_path_idxs.push(idx as u64);
+ found_blinded_tail = true;
+ }
+ }
+ debug_assert!(found_blinded_tail);
+ }
}
/// The recipient of a payment, differing based on whether they've hidden their identity with route
_ => None,
}
}
+ fn blinded_hint_idx(&self) -> Option<usize> {
+ match self {
+ Self::Blinded(BlindedPathCandidate { hint_idx, .. }) |
+ Self::OneHopBlinded(OneHopBlindedPathCandidate { hint_idx, .. }) => {
+ Some(*hint_idx)
+ },
+ _ => None,
+ }
+ }
/// Returns the source node id of current hop.
///
/// Source node id refers to the node forwarding the HTLC through this hop.
(amount_to_transfer_over_msat < $next_hops_path_htlc_minimum_msat &&
recommended_value_msat >= $next_hops_path_htlc_minimum_msat));
- let payment_failed_on_this_channel = scid_opt.map_or(false,
- |scid| payment_params.previously_failed_channels.contains(&scid));
+ let payment_failed_on_this_channel = match scid_opt {
+ Some(scid) => payment_params.previously_failed_channels.contains(&scid),
+ None => match $candidate.blinded_hint_idx() {
+ Some(idx) => {
+ payment_params.previously_failed_blinded_path_idxs.contains(&(idx as u64))
+ },
+ None => false,
+ },
+ };
let (should_log_candidate, first_hop_details) = match $candidate {
CandidateRouteHop::FirstHop(hop) => (true, Some(hop.details)),
}
}
- // Means we succesfully traversed from the payer to the payee, now
+ // Means we successfully traversed from the payer to the payee, now
// save this path for the payment route. Also, update the liquidity
// remaining on the used hops, so that we take them into account
// while looking for more paths.
use crate::offers::invoice::BlindedPayInfo;
use crate::util::config::UserConfig;
use crate::util::test_utils as ln_test_utils;
- use crate::util::chacha20::ChaCha20;
+ use crate::crypto::chacha20::ChaCha20;
use crate::util::ser::{Readable, Writeable};
#[cfg(c_bindings)]
use crate::util::ser::Writer;
(route.paths[1].hops[1].short_channel_id == 4 && route.paths[0].hops[1].short_channel_id == 13));
}
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
pub(super) fn random_init_seed() -> u64 {
// Because the default HashMap in std pulls OS randomness, we can use it as a (bad) RNG.
use core::hash::{BuildHasher, Hasher};
}
#[test]
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
fn generate_routes() {
use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
}
#[test]
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
fn generate_routes_mpp() {
use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
}
#[test]
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
fn generate_large_mpp_routes() {
use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
fn do_min_htlc_overpay_violates_max_htlc(blinded_payee: bool) {
// Test that if overpaying to meet a later hop's min_htlc and causes us to violate an earlier
// hop's max_htlc, we don't consider that candidate hop valid. Previously we would add this hop
- // to `targets` and build an invalid path with it, and subsquently hit a debug panic asserting
+ // to `targets` and build an invalid path with it, and subsequently hit a debug panic asserting
// that the used liquidity for a hop was less than its available liquidity limit.
let secp_ctx = Secp256k1::new();
let logger = Arc::new(ln_test_utils::TestLogger::new());
}
}
-#[cfg(all(any(test, ldk_bench), not(feature = "no-std")))]
+#[cfg(all(any(test, ldk_bench), feature = "std"))]
pub(crate) mod bench_utils {
use super::*;
use std::fs::File;
}
break;
}
- // If we couldn't find a path with a higer amount, reduce and try again.
+ // If we couldn't find a path with a higher amount, reduce and try again.
score_amt /= 100;
}
}
}
-#[cfg(not(c_bindings))]
+#[cfg(any(not(c_bindings), feature = "_test_utils", test))]
impl<'a, T: Score + 'a> LockableScore<'a> for RwLock<T> {
type ScoreUpdate = T;
type ScoreLookUp = T;
use bitcoin::{secp256k1, Sequence, Witness, Txid};
use crate::util::transaction_utils;
-use crate::util::crypto::{hkdf_extract_expand_twice, sign, sign_with_aux_rand};
+use crate::crypto::utils::{hkdf_extract_expand_twice, sign, sign_with_aux_rand};
use crate::util::ser::{Writeable, Writer, Readable, ReadableArgs};
use crate::chain::transaction::OutPoint;
use crate::ln::channel::ANCHOR_OUTPUT_VALUE_SATOSHI;
#[cfg(taproot)]
use crate::sign::taproot::TaprootChannelSigner;
use crate::util::atomic_counter::AtomicCounter;
-use crate::util::chacha20::ChaCha20;
+use crate::crypto::chacha20::ChaCha20;
use crate::util::invoice::construct_invoice_preimage;
pub(crate) mod type_resolver;
channel_value_satoshis: u64,
/// Key derivation parameters.
channel_keys_id: [u8; 32],
- /// Seed from which all randomness produced is derived from.
- rand_bytes_unique_start: [u8; 32],
- /// Tracks the number of times we've produced randomness to ensure we don't return the same
- /// bytes twice.
- rand_bytes_index: AtomicCounter,
+ /// A source of random bytes.
+ entropy_source: RandomBytes,
}
impl PartialEq for InMemorySigner {
channel_parameters: self.channel_parameters.clone(),
channel_value_satoshis: self.channel_value_satoshis,
channel_keys_id: self.channel_keys_id,
- rand_bytes_unique_start: self.get_secure_random_bytes(),
- rand_bytes_index: AtomicCounter::new(),
+ entropy_source: RandomBytes::new(self.get_secure_random_bytes()),
}
}
}
holder_channel_pubkeys,
channel_parameters: None,
channel_keys_id,
- rand_bytes_unique_start,
- rand_bytes_index: AtomicCounter::new(),
+ entropy_source: RandomBytes::new(rand_bytes_unique_start),
}
}
impl EntropySource for InMemorySigner {
fn get_secure_random_bytes(&self) -> [u8; 32] {
- let index = self.rand_bytes_index.get_increment();
- let mut nonce = [0u8; 16];
- nonce[..8].copy_from_slice(&index.to_be_bytes());
- ChaCha20::get_single_block(&self.rand_bytes_unique_start, &nonce)
+ self.entropy_source.get_secure_random_bytes()
}
}
holder_channel_pubkeys,
channel_parameters: counterparty_channel_data,
channel_keys_id: keys_id,
- rand_bytes_unique_start: entropy_source.get_secure_random_bytes(),
- rand_bytes_index: AtomicCounter::new(),
+ entropy_source: RandomBytes::new(entropy_source.get_secure_random_bytes()),
})
}
}
channel_master_key: ExtendedPrivKey,
channel_child_index: AtomicUsize,
- rand_bytes_unique_start: [u8; 32],
- rand_bytes_index: AtomicCounter,
+ entropy_source: RandomBytes,
seed: [u8; 32],
starting_time_secs: u64,
channel_master_key,
channel_child_index: AtomicUsize::new(0),
- rand_bytes_unique_start,
- rand_bytes_index: AtomicCounter::new(),
+ entropy_source: RandomBytes::new(rand_bytes_unique_start),
seed: *seed,
starting_time_secs,
impl EntropySource for KeysManager {
fn get_secure_random_bytes(&self) -> [u8; 32] {
- let index = self.rand_bytes_index.get_increment();
- let mut nonce = [0u8; 16];
- nonce[..8].copy_from_slice(&index.to_be_bytes());
- ChaCha20::get_single_block(&self.rand_bytes_unique_start, &nonce)
+ self.entropy_source.get_secure_random_bytes()
}
}
}
}
+/// An implementation of [`EntropySource`] using ChaCha20.
+#[derive(Debug)]
+pub struct RandomBytes {
+ /// Seed from which all randomness produced is derived from.
+ seed: [u8; 32],
+ /// Tracks the number of times we've produced randomness to ensure we don't return the same
+ /// bytes twice.
+ index: AtomicCounter,
+}
+
+impl RandomBytes {
+ /// Creates a new instance using the given seed.
+ pub fn new(seed: [u8; 32]) -> Self {
+ Self {
+ seed,
+ index: AtomicCounter::new(),
+ }
+ }
+}
+
+impl EntropySource for RandomBytes {
+ fn get_secure_random_bytes(&self) -> [u8; 32] {
+ let index = self.index.get_increment();
+ let mut nonce = [0u8; 16];
+ nonce[..8].copy_from_slice(&index.to_be_bytes());
+ ChaCha20::get_single_block(&self.seed, &nonce)
+ }
+}
+
// Ensure that EcdsaChannelSigner can have a vtable
#[test]
pub fn dyn_sign() {
+++ /dev/null
-// This file was stolen from rust-crypto.
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
-// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
-// You may not use this file except in accordance with one or both of these
-// licenses.
-
-use crate::io;
-
-#[cfg(not(fuzzing))]
-mod real_chacha {
- use core::cmp;
- use core::convert::TryInto;
-
- #[derive(Clone, Copy, PartialEq, Eq)]
- #[allow(non_camel_case_types)]
- struct u32x4(pub u32, pub u32, pub u32, pub u32);
- impl ::core::ops::Add for u32x4 {
- type Output = u32x4;
- #[inline]
- fn add(self, rhs: u32x4) -> u32x4 {
- u32x4(self.0.wrapping_add(rhs.0),
- self.1.wrapping_add(rhs.1),
- self.2.wrapping_add(rhs.2),
- self.3.wrapping_add(rhs.3))
- }
- }
- impl ::core::ops::Sub for u32x4 {
- type Output = u32x4;
- #[inline]
- fn sub(self, rhs: u32x4) -> u32x4 {
- u32x4(self.0.wrapping_sub(rhs.0),
- self.1.wrapping_sub(rhs.1),
- self.2.wrapping_sub(rhs.2),
- self.3.wrapping_sub(rhs.3))
- }
- }
- impl ::core::ops::BitXor for u32x4 {
- type Output = u32x4;
- #[inline]
- fn bitxor(self, rhs: u32x4) -> u32x4 {
- u32x4(self.0 ^ rhs.0, self.1 ^ rhs.1, self.2 ^ rhs.2, self.3 ^ rhs.3)
- }
- }
- impl ::core::ops::Shr<u8> for u32x4 {
- type Output = u32x4;
- #[inline]
- fn shr(self, shr: u8) -> u32x4 {
- u32x4(self.0 >> shr, self.1 >> shr, self.2 >> shr, self.3 >> shr)
- }
- }
- impl ::core::ops::Shl<u8> for u32x4 {
- type Output = u32x4;
- #[inline]
- fn shl(self, shl: u8) -> u32x4 {
- u32x4(self.0 << shl, self.1 << shl, self.2 << shl, self.3 << shl)
- }
- }
- impl u32x4 {
- #[inline]
- fn from_bytes(bytes: &[u8]) -> Self {
- assert_eq!(bytes.len(), 4*4);
- Self (
- u32::from_le_bytes(bytes[0*4..1*4].try_into().expect("len is 4")),
- u32::from_le_bytes(bytes[1*4..2*4].try_into().expect("len is 4")),
- u32::from_le_bytes(bytes[2*4..3*4].try_into().expect("len is 4")),
- u32::from_le_bytes(bytes[3*4..4*4].try_into().expect("len is 4")),
- )
- }
- }
-
- const BLOCK_SIZE: usize = 64;
-
- #[derive(Clone,Copy)]
- struct ChaChaState {
- a: u32x4,
- b: u32x4,
- c: u32x4,
- d: u32x4
- }
-
- #[derive(Copy)]
- pub struct ChaCha20 {
- state : ChaChaState,
- output : [u8; BLOCK_SIZE],
- offset : usize,
- }
-
- impl Clone for ChaCha20 { fn clone(&self) -> ChaCha20 { *self } }
-
- macro_rules! swizzle {
- ($b: expr, $c: expr, $d: expr) => {{
- let u32x4(b10, b11, b12, b13) = $b;
- $b = u32x4(b11, b12, b13, b10);
- let u32x4(c10, c11, c12, c13) = $c;
- $c = u32x4(c12, c13,c10, c11);
- let u32x4(d10, d11, d12, d13) = $d;
- $d = u32x4(d13, d10, d11, d12);
- }}
- }
-
- macro_rules! state_to_buffer {
- ($state: expr, $output: expr) => {{
- let u32x4(a1, a2, a3, a4) = $state.a;
- let u32x4(b1, b2, b3, b4) = $state.b;
- let u32x4(c1, c2, c3, c4) = $state.c;
- let u32x4(d1, d2, d3, d4) = $state.d;
- let lens = [
- a1,a2,a3,a4,
- b1,b2,b3,b4,
- c1,c2,c3,c4,
- d1,d2,d3,d4
- ];
- for i in 0..lens.len() {
- $output[i*4..(i+1)*4].copy_from_slice(&lens[i].to_le_bytes());
- }
- }}
- }
-
- macro_rules! round{
- ($state: expr) => {{
- $state.a = $state.a + $state.b;
- rotate!($state.d, $state.a, 16);
- $state.c = $state.c + $state.d;
- rotate!($state.b, $state.c, 12);
- $state.a = $state.a + $state.b;
- rotate!($state.d, $state.a, 8);
- $state.c = $state.c + $state.d;
- rotate!($state.b, $state.c, 7);
- }}
- }
-
- macro_rules! rotate {
- ($a: expr, $b: expr, $rot: expr) => {{
- let v = $a ^ $b;
- let r = 32 - $rot;
- let right = v >> r;
- $a = (v << $rot) ^ right
- }}
- }
-
- impl ChaCha20 {
- pub fn new(key: &[u8], nonce: &[u8]) -> ChaCha20 {
- assert!(key.len() == 16 || key.len() == 32);
- assert!(nonce.len() == 8 || nonce.len() == 12);
-
- ChaCha20{ state: ChaCha20::expand(key, nonce), output: [0u8; BLOCK_SIZE], offset: 64 }
- }
-
- /// Get one block from a ChaCha stream.
- pub fn get_single_block(key: &[u8; 32], nonce: &[u8; 16]) -> [u8; 32] {
- let mut chacha = ChaCha20 { state: ChaCha20::expand(key, nonce), output: [0u8; BLOCK_SIZE], offset: 64 };
- let mut chacha_bytes = [0; 32];
- chacha.process_in_place(&mut chacha_bytes);
- chacha_bytes
- }
-
- /// Encrypts `src` into `dest` using a single block from a ChaCha stream. Passing `dest` as
- /// `src` in a second call will decrypt it.
- pub fn encrypt_single_block(
- key: &[u8; 32], nonce: &[u8; 16], dest: &mut [u8], src: &[u8]
- ) {
- debug_assert_eq!(dest.len(), src.len());
- debug_assert!(dest.len() <= 32);
-
- let block = ChaCha20::get_single_block(key, nonce);
- for i in 0..dest.len() {
- dest[i] = block[i] ^ src[i];
- }
- }
-
- /// Same as `encrypt_single_block` only operates on a fixed-size input in-place.
- pub fn encrypt_single_block_in_place(
- key: &[u8; 32], nonce: &[u8; 16], bytes: &mut [u8; 32]
- ) {
- let block = ChaCha20::get_single_block(key, nonce);
- for i in 0..bytes.len() {
- bytes[i] = block[i] ^ bytes[i];
- }
- }
-
- fn expand(key: &[u8], nonce: &[u8]) -> ChaChaState {
- let constant = match key.len() {
- 16 => b"expand 16-byte k",
- 32 => b"expand 32-byte k",
- _ => unreachable!(),
- };
- ChaChaState {
- a: u32x4::from_bytes(&constant[0..16]),
- b: u32x4::from_bytes(&key[0..16]),
- c: if key.len() == 16 {
- u32x4::from_bytes(&key[0..16])
- } else {
- u32x4::from_bytes(&key[16..32])
- },
- d: if nonce.len() == 16 {
- u32x4::from_bytes(&nonce[0..16])
- } else if nonce.len() == 12 {
- let mut nonce4 = [0; 4*4];
- nonce4[4..].copy_from_slice(nonce);
- u32x4::from_bytes(&nonce4)
- } else {
- let mut nonce4 = [0; 4*4];
- nonce4[8..].copy_from_slice(nonce);
- u32x4::from_bytes(&nonce4)
- }
- }
- }
-
- // put the the next BLOCK_SIZE keystream bytes into self.output
- fn update(&mut self) {
- let mut state = self.state;
-
- for _ in 0..10 {
- round!(state);
- swizzle!(state.b, state.c, state.d);
- round!(state);
- swizzle!(state.d, state.c, state.b);
- }
- state.a = state.a + self.state.a;
- state.b = state.b + self.state.b;
- state.c = state.c + self.state.c;
- state.d = state.d + self.state.d;
-
- state_to_buffer!(state, self.output);
-
- self.state.d = self.state.d + u32x4(1, 0, 0, 0);
- let u32x4(c12, _, _, _) = self.state.d;
- if c12 == 0 {
- // we could increment the other counter word with an 8 byte nonce
- // but other implementations like boringssl have this same
- // limitation
- panic!("counter is exhausted");
- }
-
- self.offset = 0;
- }
-
- #[inline] // Useful cause input may be 0s on stack that should be optimized out
- pub fn process(&mut self, input: &[u8], output: &mut [u8]) {
- assert!(input.len() == output.len());
- let len = input.len();
- let mut i = 0;
- while i < len {
- // If there is no keystream available in the output buffer,
- // generate the next block.
- if self.offset == BLOCK_SIZE {
- self.update();
- }
-
- // Process the min(available keystream, remaining input length).
- let count = cmp::min(BLOCK_SIZE - self.offset, len - i);
- // explicitly assert lengths to avoid bounds checks:
- assert!(output.len() >= i + count);
- assert!(input.len() >= i + count);
- assert!(self.output.len() >= self.offset + count);
- for j in 0..count {
- output[i + j] = input[i + j] ^ self.output[self.offset + j];
- }
- i += count;
- self.offset += count;
- }
- }
-
- pub fn process_in_place(&mut self, input_output: &mut [u8]) {
- let len = input_output.len();
- let mut i = 0;
- while i < len {
- // If there is no keystream available in the output buffer,
- // generate the next block.
- if self.offset == BLOCK_SIZE {
- self.update();
- }
-
- // Process the min(available keystream, remaining input length).
- let count = cmp::min(BLOCK_SIZE - self.offset, len - i);
- // explicitly assert lengths to avoid bounds checks:
- assert!(input_output.len() >= i + count);
- assert!(self.output.len() >= self.offset + count);
- for j in 0..count {
- input_output[i + j] ^= self.output[self.offset + j];
- }
- i += count;
- self.offset += count;
- }
- }
-
- #[cfg(test)]
- pub fn seek_to_block(&mut self, block_offset: u32) {
- self.state.d.0 = block_offset;
- self.update();
- }
- }
-}
-#[cfg(not(fuzzing))]
-pub use self::real_chacha::ChaCha20;
-
-#[cfg(fuzzing)]
-mod fuzzy_chacha {
- pub struct ChaCha20 {}
-
- impl ChaCha20 {
- pub fn new(key: &[u8], nonce: &[u8]) -> ChaCha20 {
- assert!(key.len() == 16 || key.len() == 32);
- assert!(nonce.len() == 8 || nonce.len() == 12);
- Self {}
- }
-
- pub fn get_single_block(_key: &[u8; 32], _nonce: &[u8; 16]) -> [u8; 32] {
- [0; 32]
- }
-
- pub fn encrypt_single_block(
- _key: &[u8; 32], _nonce: &[u8; 16], dest: &mut [u8], src: &[u8]
- ) {
- debug_assert_eq!(dest.len(), src.len());
- debug_assert!(dest.len() <= 32);
- }
-
- pub fn encrypt_single_block_in_place(
- _key: &[u8; 32], _nonce: &[u8; 16], _bytes: &mut [u8; 32]
- ) {}
-
- pub fn process(&mut self, input: &[u8], output: &mut [u8]) {
- output.copy_from_slice(input);
- }
-
- pub fn process_in_place(&mut self, _input_output: &mut [u8]) {}
- }
-}
-#[cfg(fuzzing)]
-pub use self::fuzzy_chacha::ChaCha20;
-
-pub(crate) struct ChaChaReader<'a, R: io::Read> {
- pub chacha: &'a mut ChaCha20,
- pub read: R,
-}
-impl<'a, R: io::Read> io::Read for ChaChaReader<'a, R> {
- fn read(&mut self, dest: &mut [u8]) -> Result<usize, io::Error> {
- let res = self.read.read(dest)?;
- if res > 0 {
- self.chacha.process_in_place(&mut dest[0..res]);
- }
- Ok(res)
- }
-}
-
-#[cfg(test)]
-mod test {
- use crate::prelude::*;
- use core::iter::repeat;
-
- use super::ChaCha20;
- use std::convert::TryInto;
-
- #[test]
- fn test_chacha20_256_tls_vectors() {
- struct TestVector {
- key: [u8; 32],
- nonce: [u8; 8],
- keystream: Vec<u8>,
- }
- // taken from http://tools.ietf.org/html/draft-agl-tls-chacha20poly1305-04
- let test_vectors = vec!(
- TestVector{
- key: [
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- ],
- nonce: [ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ],
- keystream: vec!(
- 0x76, 0xb8, 0xe0, 0xad, 0xa0, 0xf1, 0x3d, 0x90,
- 0x40, 0x5d, 0x6a, 0xe5, 0x53, 0x86, 0xbd, 0x28,
- 0xbd, 0xd2, 0x19, 0xb8, 0xa0, 0x8d, 0xed, 0x1a,
- 0xa8, 0x36, 0xef, 0xcc, 0x8b, 0x77, 0x0d, 0xc7,
- 0xda, 0x41, 0x59, 0x7c, 0x51, 0x57, 0x48, 0x8d,
- 0x77, 0x24, 0xe0, 0x3f, 0xb8, 0xd8, 0x4a, 0x37,
- 0x6a, 0x43, 0xb8, 0xf4, 0x15, 0x18, 0xa1, 0x1c,
- 0xc3, 0x87, 0xb6, 0x69, 0xb2, 0xee, 0x65, 0x86,
- ),
- }, TestVector{
- key: [
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
- ],
- nonce: [ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ],
- keystream: vec!(
- 0x45, 0x40, 0xf0, 0x5a, 0x9f, 0x1f, 0xb2, 0x96,
- 0xd7, 0x73, 0x6e, 0x7b, 0x20, 0x8e, 0x3c, 0x96,
- 0xeb, 0x4f, 0xe1, 0x83, 0x46, 0x88, 0xd2, 0x60,
- 0x4f, 0x45, 0x09, 0x52, 0xed, 0x43, 0x2d, 0x41,
- 0xbb, 0xe2, 0xa0, 0xb6, 0xea, 0x75, 0x66, 0xd2,
- 0xa5, 0xd1, 0xe7, 0xe2, 0x0d, 0x42, 0xaf, 0x2c,
- 0x53, 0xd7, 0x92, 0xb1, 0xc4, 0x3f, 0xea, 0x81,
- 0x7e, 0x9a, 0xd2, 0x75, 0xae, 0x54, 0x69, 0x63,
- ),
- }, TestVector{
- key: [
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- ],
- nonce: [ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 ],
- keystream: vec!(
- 0xde, 0x9c, 0xba, 0x7b, 0xf3, 0xd6, 0x9e, 0xf5,
- 0xe7, 0x86, 0xdc, 0x63, 0x97, 0x3f, 0x65, 0x3a,
- 0x0b, 0x49, 0xe0, 0x15, 0xad, 0xbf, 0xf7, 0x13,
- 0x4f, 0xcb, 0x7d, 0xf1, 0x37, 0x82, 0x10, 0x31,
- 0xe8, 0x5a, 0x05, 0x02, 0x78, 0xa7, 0x08, 0x45,
- 0x27, 0x21, 0x4f, 0x73, 0xef, 0xc7, 0xfa, 0x5b,
- 0x52, 0x77, 0x06, 0x2e, 0xb7, 0xa0, 0x43, 0x3e,
- 0x44, 0x5f, 0x41, 0xe3,
- ),
- }, TestVector{
- key: [
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- ],
- nonce: [ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ],
- keystream: vec!(
- 0xef, 0x3f, 0xdf, 0xd6, 0xc6, 0x15, 0x78, 0xfb,
- 0xf5, 0xcf, 0x35, 0xbd, 0x3d, 0xd3, 0x3b, 0x80,
- 0x09, 0x63, 0x16, 0x34, 0xd2, 0x1e, 0x42, 0xac,
- 0x33, 0x96, 0x0b, 0xd1, 0x38, 0xe5, 0x0d, 0x32,
- 0x11, 0x1e, 0x4c, 0xaf, 0x23, 0x7e, 0xe5, 0x3c,
- 0xa8, 0xad, 0x64, 0x26, 0x19, 0x4a, 0x88, 0x54,
- 0x5d, 0xdc, 0x49, 0x7a, 0x0b, 0x46, 0x6e, 0x7d,
- 0x6b, 0xbd, 0xb0, 0x04, 0x1b, 0x2f, 0x58, 0x6b,
- ),
- }, TestVector{
- key: [
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
- 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
- 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
- ],
- nonce: [ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 ],
- keystream: vec!(
- 0xf7, 0x98, 0xa1, 0x89, 0xf1, 0x95, 0xe6, 0x69,
- 0x82, 0x10, 0x5f, 0xfb, 0x64, 0x0b, 0xb7, 0x75,
- 0x7f, 0x57, 0x9d, 0xa3, 0x16, 0x02, 0xfc, 0x93,
- 0xec, 0x01, 0xac, 0x56, 0xf8, 0x5a, 0xc3, 0xc1,
- 0x34, 0xa4, 0x54, 0x7b, 0x73, 0x3b, 0x46, 0x41,
- 0x30, 0x42, 0xc9, 0x44, 0x00, 0x49, 0x17, 0x69,
- 0x05, 0xd3, 0xbe, 0x59, 0xea, 0x1c, 0x53, 0xf1,
- 0x59, 0x16, 0x15, 0x5c, 0x2b, 0xe8, 0x24, 0x1a,
- 0x38, 0x00, 0x8b, 0x9a, 0x26, 0xbc, 0x35, 0x94,
- 0x1e, 0x24, 0x44, 0x17, 0x7c, 0x8a, 0xde, 0x66,
- 0x89, 0xde, 0x95, 0x26, 0x49, 0x86, 0xd9, 0x58,
- 0x89, 0xfb, 0x60, 0xe8, 0x46, 0x29, 0xc9, 0xbd,
- 0x9a, 0x5a, 0xcb, 0x1c, 0xc1, 0x18, 0xbe, 0x56,
- 0x3e, 0xb9, 0xb3, 0xa4, 0xa4, 0x72, 0xf8, 0x2e,
- 0x09, 0xa7, 0xe7, 0x78, 0x49, 0x2b, 0x56, 0x2e,
- 0xf7, 0x13, 0x0e, 0x88, 0xdf, 0xe0, 0x31, 0xc7,
- 0x9d, 0xb9, 0xd4, 0xf7, 0xc7, 0xa8, 0x99, 0x15,
- 0x1b, 0x9a, 0x47, 0x50, 0x32, 0xb6, 0x3f, 0xc3,
- 0x85, 0x24, 0x5f, 0xe0, 0x54, 0xe3, 0xdd, 0x5a,
- 0x97, 0xa5, 0xf5, 0x76, 0xfe, 0x06, 0x40, 0x25,
- 0xd3, 0xce, 0x04, 0x2c, 0x56, 0x6a, 0xb2, 0xc5,
- 0x07, 0xb1, 0x38, 0xdb, 0x85, 0x3e, 0x3d, 0x69,
- 0x59, 0x66, 0x09, 0x96, 0x54, 0x6c, 0xc9, 0xc4,
- 0xa6, 0xea, 0xfd, 0xc7, 0x77, 0xc0, 0x40, 0xd7,
- 0x0e, 0xaf, 0x46, 0xf7, 0x6d, 0xad, 0x39, 0x79,
- 0xe5, 0xc5, 0x36, 0x0c, 0x33, 0x17, 0x16, 0x6a,
- 0x1c, 0x89, 0x4c, 0x94, 0xa3, 0x71, 0x87, 0x6a,
- 0x94, 0xdf, 0x76, 0x28, 0xfe, 0x4e, 0xaa, 0xf2,
- 0xcc, 0xb2, 0x7d, 0x5a, 0xaa, 0xe0, 0xad, 0x7a,
- 0xd0, 0xf9, 0xd4, 0xb6, 0xad, 0x3b, 0x54, 0x09,
- 0x87, 0x46, 0xd4, 0x52, 0x4d, 0x38, 0x40, 0x7a,
- 0x6d, 0xeb, 0x3a, 0xb7, 0x8f, 0xab, 0x78, 0xc9,
- ),
- },
- );
-
- for tv in test_vectors.iter() {
- let mut c = ChaCha20::new(&tv.key, &tv.nonce);
- let input: Vec<u8> = repeat(0).take(tv.keystream.len()).collect();
- let mut output: Vec<u8> = repeat(0).take(input.len()).collect();
- c.process(&input[..], &mut output[..]);
- assert_eq!(output, tv.keystream);
- }
- }
-
- #[test]
- fn test_chacha20_256_tls_vectors_96_nonce() {
- struct TestVector {
- key: [u8; 32],
- nonce: [u8; 12],
- keystream: Vec<u8>,
- }
- // taken from http://tools.ietf.org/html/draft-agl-tls-chacha20poly1305-04
- let test_vectors = vec!(
- TestVector{
- key: [
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- ],
- nonce: [ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ],
- keystream: vec!(
- 0x76, 0xb8, 0xe0, 0xad, 0xa0, 0xf1, 0x3d, 0x90,
- 0x40, 0x5d, 0x6a, 0xe5, 0x53, 0x86, 0xbd, 0x28,
- 0xbd, 0xd2, 0x19, 0xb8, 0xa0, 0x8d, 0xed, 0x1a,
- 0xa8, 0x36, 0xef, 0xcc, 0x8b, 0x77, 0x0d, 0xc7,
- 0xda, 0x41, 0x59, 0x7c, 0x51, 0x57, 0x48, 0x8d,
- 0x77, 0x24, 0xe0, 0x3f, 0xb8, 0xd8, 0x4a, 0x37,
- 0x6a, 0x43, 0xb8, 0xf4, 0x15, 0x18, 0xa1, 0x1c,
- 0xc3, 0x87, 0xb6, 0x69, 0xb2, 0xee, 0x65, 0x86,
- ),
- }, TestVector{
- key: [
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
- ],
- nonce: [ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ],
- keystream: vec!(
- 0x45, 0x40, 0xf0, 0x5a, 0x9f, 0x1f, 0xb2, 0x96,
- 0xd7, 0x73, 0x6e, 0x7b, 0x20, 0x8e, 0x3c, 0x96,
- 0xeb, 0x4f, 0xe1, 0x83, 0x46, 0x88, 0xd2, 0x60,
- 0x4f, 0x45, 0x09, 0x52, 0xed, 0x43, 0x2d, 0x41,
- 0xbb, 0xe2, 0xa0, 0xb6, 0xea, 0x75, 0x66, 0xd2,
- 0xa5, 0xd1, 0xe7, 0xe2, 0x0d, 0x42, 0xaf, 0x2c,
- 0x53, 0xd7, 0x92, 0xb1, 0xc4, 0x3f, 0xea, 0x81,
- 0x7e, 0x9a, 0xd2, 0x75, 0xae, 0x54, 0x69, 0x63,
- ),
- }, TestVector{
- key: [
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- ],
- nonce: [ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 ],
- keystream: vec!(
- 0xde, 0x9c, 0xba, 0x7b, 0xf3, 0xd6, 0x9e, 0xf5,
- 0xe7, 0x86, 0xdc, 0x63, 0x97, 0x3f, 0x65, 0x3a,
- 0x0b, 0x49, 0xe0, 0x15, 0xad, 0xbf, 0xf7, 0x13,
- 0x4f, 0xcb, 0x7d, 0xf1, 0x37, 0x82, 0x10, 0x31,
- 0xe8, 0x5a, 0x05, 0x02, 0x78, 0xa7, 0x08, 0x45,
- 0x27, 0x21, 0x4f, 0x73, 0xef, 0xc7, 0xfa, 0x5b,
- 0x52, 0x77, 0x06, 0x2e, 0xb7, 0xa0, 0x43, 0x3e,
- 0x44, 0x5f, 0x41, 0xe3,
- ),
- }, TestVector{
- key: [
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- ],
- nonce: [ 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ],
- keystream: vec!(
- 0xef, 0x3f, 0xdf, 0xd6, 0xc6, 0x15, 0x78, 0xfb,
- 0xf5, 0xcf, 0x35, 0xbd, 0x3d, 0xd3, 0x3b, 0x80,
- 0x09, 0x63, 0x16, 0x34, 0xd2, 0x1e, 0x42, 0xac,
- 0x33, 0x96, 0x0b, 0xd1, 0x38, 0xe5, 0x0d, 0x32,
- 0x11, 0x1e, 0x4c, 0xaf, 0x23, 0x7e, 0xe5, 0x3c,
- 0xa8, 0xad, 0x64, 0x26, 0x19, 0x4a, 0x88, 0x54,
- 0x5d, 0xdc, 0x49, 0x7a, 0x0b, 0x46, 0x6e, 0x7d,
- 0x6b, 0xbd, 0xb0, 0x04, 0x1b, 0x2f, 0x58, 0x6b,
- ),
- }, TestVector{
- key: [
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
- 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
- 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
- ],
- nonce: [0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 ],
- keystream: vec!(
- 0xf7, 0x98, 0xa1, 0x89, 0xf1, 0x95, 0xe6, 0x69,
- 0x82, 0x10, 0x5f, 0xfb, 0x64, 0x0b, 0xb7, 0x75,
- 0x7f, 0x57, 0x9d, 0xa3, 0x16, 0x02, 0xfc, 0x93,
- 0xec, 0x01, 0xac, 0x56, 0xf8, 0x5a, 0xc3, 0xc1,
- 0x34, 0xa4, 0x54, 0x7b, 0x73, 0x3b, 0x46, 0x41,
- 0x30, 0x42, 0xc9, 0x44, 0x00, 0x49, 0x17, 0x69,
- 0x05, 0xd3, 0xbe, 0x59, 0xea, 0x1c, 0x53, 0xf1,
- 0x59, 0x16, 0x15, 0x5c, 0x2b, 0xe8, 0x24, 0x1a,
- 0x38, 0x00, 0x8b, 0x9a, 0x26, 0xbc, 0x35, 0x94,
- 0x1e, 0x24, 0x44, 0x17, 0x7c, 0x8a, 0xde, 0x66,
- 0x89, 0xde, 0x95, 0x26, 0x49, 0x86, 0xd9, 0x58,
- 0x89, 0xfb, 0x60, 0xe8, 0x46, 0x29, 0xc9, 0xbd,
- 0x9a, 0x5a, 0xcb, 0x1c, 0xc1, 0x18, 0xbe, 0x56,
- 0x3e, 0xb9, 0xb3, 0xa4, 0xa4, 0x72, 0xf8, 0x2e,
- 0x09, 0xa7, 0xe7, 0x78, 0x49, 0x2b, 0x56, 0x2e,
- 0xf7, 0x13, 0x0e, 0x88, 0xdf, 0xe0, 0x31, 0xc7,
- 0x9d, 0xb9, 0xd4, 0xf7, 0xc7, 0xa8, 0x99, 0x15,
- 0x1b, 0x9a, 0x47, 0x50, 0x32, 0xb6, 0x3f, 0xc3,
- 0x85, 0x24, 0x5f, 0xe0, 0x54, 0xe3, 0xdd, 0x5a,
- 0x97, 0xa5, 0xf5, 0x76, 0xfe, 0x06, 0x40, 0x25,
- 0xd3, 0xce, 0x04, 0x2c, 0x56, 0x6a, 0xb2, 0xc5,
- 0x07, 0xb1, 0x38, 0xdb, 0x85, 0x3e, 0x3d, 0x69,
- 0x59, 0x66, 0x09, 0x96, 0x54, 0x6c, 0xc9, 0xc4,
- 0xa6, 0xea, 0xfd, 0xc7, 0x77, 0xc0, 0x40, 0xd7,
- 0x0e, 0xaf, 0x46, 0xf7, 0x6d, 0xad, 0x39, 0x79,
- 0xe5, 0xc5, 0x36, 0x0c, 0x33, 0x17, 0x16, 0x6a,
- 0x1c, 0x89, 0x4c, 0x94, 0xa3, 0x71, 0x87, 0x6a,
- 0x94, 0xdf, 0x76, 0x28, 0xfe, 0x4e, 0xaa, 0xf2,
- 0xcc, 0xb2, 0x7d, 0x5a, 0xaa, 0xe0, 0xad, 0x7a,
- 0xd0, 0xf9, 0xd4, 0xb6, 0xad, 0x3b, 0x54, 0x09,
- 0x87, 0x46, 0xd4, 0x52, 0x4d, 0x38, 0x40, 0x7a,
- 0x6d, 0xeb, 0x3a, 0xb7, 0x8f, 0xab, 0x78, 0xc9,
- ),
- },
- );
-
- for tv in test_vectors.iter() {
- let mut c = ChaCha20::new(&tv.key, &tv.nonce);
- let input: Vec<u8> = repeat(0).take(tv.keystream.len()).collect();
- let mut output: Vec<u8> = repeat(0).take(input.len()).collect();
- c.process(&input[..], &mut output[..]);
- assert_eq!(output, tv.keystream);
- }
- }
-
- #[test]
- fn get_single_block() {
- // Test that `get_single_block` (which takes a 16-byte nonce) is equivalent to getting a block
- // using a 12-byte nonce, with the block starting at the counter offset given by the remaining 4
- // bytes.
- let key = [
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
- 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
- 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
- ];
- let nonce_16bytes = [
- 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b
- ];
- let counter_pos = &nonce_16bytes[..4];
- let nonce_12bytes = &nonce_16bytes[4..];
-
- // Initialize a ChaCha20 instance with its counter starting at 0.
- let mut chacha20 = ChaCha20::new(&key, nonce_12bytes);
- // Seek its counter to the block at counter_pos.
- chacha20.seek_to_block(u32::from_le_bytes(counter_pos.try_into().unwrap()));
- let mut block_bytes = [0; 32];
- chacha20.process_in_place(&mut block_bytes);
-
- assert_eq!(ChaCha20::get_single_block(&key, &nonce_16bytes), block_bytes);
- }
-
- #[test]
- fn encrypt_single_block() {
- let key = [
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
- 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
- 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
- ];
- let nonce = [
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
- 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
- ];
- let bytes = [1; 32];
-
- let mut encrypted_bytes = [0; 32];
- ChaCha20::encrypt_single_block(&key, &nonce, &mut encrypted_bytes, &bytes);
-
- let mut decrypted_bytes = [0; 32];
- ChaCha20::encrypt_single_block(&key, &nonce, &mut decrypted_bytes, &encrypted_bytes);
-
- assert_eq!(bytes, decrypted_bytes);
- }
-
- #[test]
- fn encrypt_single_block_in_place() {
- let key = [
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
- 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
- 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
- ];
- let nonce = [
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
- 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
- ];
- let unencrypted_bytes = [1; 32];
- let mut bytes = unencrypted_bytes;
-
- ChaCha20::encrypt_single_block_in_place(&key, &nonce, &mut bytes);
- assert_ne!(bytes, unencrypted_bytes);
-
- ChaCha20::encrypt_single_block_in_place(&key, &nonce, &mut bytes);
- assert_eq!(bytes, unencrypted_bytes);
- }
-}
+++ /dev/null
-// ring has a garbage API so its use is avoided, but rust-crypto doesn't have RFC-variant poly1305
-// Instead, we steal rust-crypto's implementation and tweak it to match the RFC.
-//
-// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
-// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
-// You may not use this file except in accordance with one or both of these
-// licenses.
-//
-// This is a port of Andrew Moons poly1305-donna
-// https://github.com/floodyberry/poly1305-donna
-
-use crate::ln::msgs::DecodeError;
-use crate::util::ser::{FixedLengthReader, LengthRead, LengthReadableArgs, Readable, Writeable, Writer};
-use crate::io::{self, Read, Write};
-
-#[cfg(not(fuzzing))]
-mod real_chachapoly {
- use crate::util::chacha20::ChaCha20;
- use crate::util::poly1305::Poly1305;
- use bitcoin::hashes::cmp::fixed_time_eq;
-
- #[derive(Clone, Copy)]
- pub struct ChaCha20Poly1305RFC {
- cipher: ChaCha20,
- mac: Poly1305,
- finished: bool,
- data_len: usize,
- aad_len: u64,
- }
-
- impl ChaCha20Poly1305RFC {
- #[inline]
- fn pad_mac_16(mac: &mut Poly1305, len: usize) {
- if len % 16 != 0 {
- mac.input(&[0; 16][0..16 - (len % 16)]);
- }
- }
- pub fn new(key: &[u8], nonce: &[u8], aad: &[u8]) -> ChaCha20Poly1305RFC {
- assert!(key.len() == 16 || key.len() == 32);
- assert!(nonce.len() == 12);
-
- // Ehh, I'm too lazy to *also* tweak ChaCha20 to make it RFC-compliant
- assert!(nonce[0] == 0 && nonce[1] == 0 && nonce[2] == 0 && nonce[3] == 0);
-
- let mut cipher = ChaCha20::new(key, &nonce[4..]);
- let mut mac_key = [0u8; 64];
- let zero_key = [0u8; 64];
- cipher.process(&zero_key, &mut mac_key);
-
- let mut mac = Poly1305::new(&mac_key[..32]);
- mac.input(aad);
- ChaCha20Poly1305RFC::pad_mac_16(&mut mac, aad.len());
-
- ChaCha20Poly1305RFC {
- cipher,
- mac,
- finished: false,
- data_len: 0,
- aad_len: aad.len() as u64,
- }
- }
-
- pub fn encrypt(&mut self, input: &[u8], output: &mut [u8], out_tag: &mut [u8]) {
- assert!(input.len() == output.len());
- assert!(self.finished == false);
- self.cipher.process(input, output);
- self.data_len += input.len();
- self.mac.input(output);
- ChaCha20Poly1305RFC::pad_mac_16(&mut self.mac, self.data_len);
- self.finished = true;
- self.mac.input(&self.aad_len.to_le_bytes());
- self.mac.input(&(self.data_len as u64).to_le_bytes());
- self.mac.raw_result(out_tag);
- }
-
- pub fn encrypt_full_message_in_place(&mut self, input_output: &mut [u8], out_tag: &mut [u8]) {
- self.encrypt_in_place(input_output);
- self.finish_and_get_tag(out_tag);
- }
-
- // Encrypt `input_output` in-place. To finish and calculate the tag, use `finish_and_get_tag`
- // below.
- pub(super) fn encrypt_in_place(&mut self, input_output: &mut [u8]) {
- debug_assert!(self.finished == false);
- self.cipher.process_in_place(input_output);
- self.data_len += input_output.len();
- self.mac.input(input_output);
- }
-
- // If we were previously encrypting with `encrypt_in_place`, this method can be used to finish
- // encrypting and calculate the tag.
- pub(super) fn finish_and_get_tag(&mut self, out_tag: &mut [u8]) {
- debug_assert!(self.finished == false);
- ChaCha20Poly1305RFC::pad_mac_16(&mut self.mac, self.data_len);
- self.finished = true;
- self.mac.input(&self.aad_len.to_le_bytes());
- self.mac.input(&(self.data_len as u64).to_le_bytes());
- self.mac.raw_result(out_tag);
- }
-
- pub fn decrypt(&mut self, input: &[u8], output: &mut [u8], tag: &[u8]) -> bool {
- assert!(input.len() == output.len());
- assert!(self.finished == false);
-
- self.finished = true;
-
- self.mac.input(input);
-
- self.data_len += input.len();
- ChaCha20Poly1305RFC::pad_mac_16(&mut self.mac, self.data_len);
- self.mac.input(&self.aad_len.to_le_bytes());
- self.mac.input(&(self.data_len as u64).to_le_bytes());
-
- let mut calc_tag = [0u8; 16];
- self.mac.raw_result(&mut calc_tag);
- if fixed_time_eq(&calc_tag, tag) {
- self.cipher.process(input, output);
- true
- } else {
- false
- }
- }
-
- pub fn check_decrypt_in_place(&mut self, input_output: &mut [u8], tag: &[u8]) -> Result<(), ()> {
- self.decrypt_in_place(input_output);
- if self.finish_and_check_tag(tag) { Ok(()) } else { Err(()) }
- }
-
- /// Decrypt in place, without checking the tag. Use `finish_and_check_tag` to check it
- /// later when decryption finishes.
- ///
- /// Should never be `pub` because the public API should always enforce tag checking.
- pub(super) fn decrypt_in_place(&mut self, input_output: &mut [u8]) {
- debug_assert!(self.finished == false);
- self.mac.input(input_output);
- self.data_len += input_output.len();
- self.cipher.process_in_place(input_output);
- }
-
- /// If we were previously decrypting with `just_decrypt_in_place`, this method must be used
- /// to check the tag. Returns whether or not the tag is valid.
- pub(super) fn finish_and_check_tag(&mut self, tag: &[u8]) -> bool {
- debug_assert!(self.finished == false);
- self.finished = true;
- ChaCha20Poly1305RFC::pad_mac_16(&mut self.mac, self.data_len);
- self.mac.input(&self.aad_len.to_le_bytes());
- self.mac.input(&(self.data_len as u64).to_le_bytes());
-
- let mut calc_tag = [0u8; 16];
- self.mac.raw_result(&mut calc_tag);
- if fixed_time_eq(&calc_tag, tag) {
- true
- } else {
- false
- }
- }
- }
-}
-#[cfg(not(fuzzing))]
-pub use self::real_chachapoly::ChaCha20Poly1305RFC;
-
-/// Enables simultaneously reading and decrypting a ChaCha20Poly1305RFC stream from a std::io::Read.
-struct ChaChaPolyReader<'a, R: Read> {
- pub chacha: &'a mut ChaCha20Poly1305RFC,
- pub read: R,
-}
-
-impl<'a, R: Read> Read for ChaChaPolyReader<'a, R> {
- // Decrypt bytes from Self::read into `dest`.
- // `ChaCha20Poly1305RFC::finish_and_check_tag` must be called to check the tag after all reads
- // complete.
- fn read(&mut self, dest: &mut [u8]) -> Result<usize, io::Error> {
- let res = self.read.read(dest)?;
- if res > 0 {
- self.chacha.decrypt_in_place(&mut dest[0..res]);
- }
- Ok(res)
- }
-}
-
-/// Enables simultaneously writing and encrypting a byte stream into a Writer.
-struct ChaChaPolyWriter<'a, W: Writer> {
- pub chacha: &'a mut ChaCha20Poly1305RFC,
- pub write: &'a mut W,
-}
-
-impl<'a, W: Writer> Writer for ChaChaPolyWriter<'a, W> {
- // Encrypt then write bytes from `src` into Self::write.
- // `ChaCha20Poly1305RFC::finish_and_get_tag` can be called to retrieve the tag after all writes
- // complete.
- fn write_all(&mut self, src: &[u8]) -> Result<(), io::Error> {
- let mut src_idx = 0;
- while src_idx < src.len() {
- let mut write_buffer = [0; 8192];
- let bytes_written = (&mut write_buffer[..]).write(&src[src_idx..]).expect("In-memory writes can't fail");
- self.chacha.encrypt_in_place(&mut write_buffer[..bytes_written]);
- self.write.write_all(&write_buffer[..bytes_written])?;
- src_idx += bytes_written;
- }
- Ok(())
- }
-}
-
-/// Enables the use of the serialization macros for objects that need to be simultaneously encrypted and
-/// serialized. This allows us to avoid an intermediate Vec allocation.
-pub(crate) struct ChaChaPolyWriteAdapter<'a, W: Writeable> {
- pub rho: [u8; 32],
- pub writeable: &'a W,
-}
-
-impl<'a, W: Writeable> ChaChaPolyWriteAdapter<'a, W> {
- #[allow(unused)] // This will be used for onion messages soon
- pub fn new(rho: [u8; 32], writeable: &'a W) -> ChaChaPolyWriteAdapter<'a, W> {
- Self { rho, writeable }
- }
-}
-
-impl<'a, T: Writeable> Writeable for ChaChaPolyWriteAdapter<'a, T> {
- // Simultaneously write and encrypt Self::writeable.
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
- let mut chacha = ChaCha20Poly1305RFC::new(&self.rho, &[0; 12], &[]);
- let mut chacha_stream = ChaChaPolyWriter { chacha: &mut chacha, write: w };
- self.writeable.write(&mut chacha_stream)?;
- let mut tag = [0 as u8; 16];
- chacha.finish_and_get_tag(&mut tag);
- tag.write(w)?;
-
- Ok(())
- }
-}
-
-/// Enables the use of the serialization macros for objects that need to be simultaneously decrypted and
-/// deserialized. This allows us to avoid an intermediate Vec allocation.
-pub(crate) struct ChaChaPolyReadAdapter<R: Readable> {
- pub readable: R,
-}
-
-impl<T: Readable> LengthReadableArgs<[u8; 32]> for ChaChaPolyReadAdapter<T> {
- // Simultaneously read and decrypt an object from a LengthRead, storing it in Self::readable.
- // LengthRead must be used instead of std::io::Read because we need the total length to separate
- // out the tag at the end.
- fn read<R: LengthRead>(mut r: &mut R, secret: [u8; 32]) -> Result<Self, DecodeError> {
- if r.total_bytes() < 16 { return Err(DecodeError::InvalidValue) }
-
- let mut chacha = ChaCha20Poly1305RFC::new(&secret, &[0; 12], &[]);
- let decrypted_len = r.total_bytes() - 16;
- let s = FixedLengthReader::new(&mut r, decrypted_len);
- let mut chacha_stream = ChaChaPolyReader { chacha: &mut chacha, read: s };
- let readable: T = Readable::read(&mut chacha_stream)?;
- chacha_stream.read.eat_remaining()?;
-
- let mut tag = [0 as u8; 16];
- r.read_exact(&mut tag)?;
- if !chacha.finish_and_check_tag(&tag) {
- return Err(DecodeError::InvalidValue)
- }
-
- Ok(Self { readable })
- }
-}
-
-#[cfg(fuzzing)]
-mod fuzzy_chachapoly {
- #[derive(Clone, Copy)]
- pub struct ChaCha20Poly1305RFC {
- tag: [u8; 16],
- finished: bool,
- }
- impl ChaCha20Poly1305RFC {
- pub fn new(key: &[u8], nonce: &[u8], _aad: &[u8]) -> ChaCha20Poly1305RFC {
- assert!(key.len() == 16 || key.len() == 32);
- assert!(nonce.len() == 12);
-
- // Ehh, I'm too lazy to *also* tweak ChaCha20 to make it RFC-compliant
- assert!(nonce[0] == 0 && nonce[1] == 0 && nonce[2] == 0 && nonce[3] == 0);
-
- let mut tag = [0; 16];
- tag.copy_from_slice(&key[0..16]);
-
- ChaCha20Poly1305RFC {
- tag,
- finished: false,
- }
- }
-
- pub fn encrypt(&mut self, input: &[u8], output: &mut [u8], out_tag: &mut [u8]) {
- assert!(input.len() == output.len());
- assert!(self.finished == false);
-
- output.copy_from_slice(&input);
- out_tag.copy_from_slice(&self.tag);
- self.finished = true;
- }
-
- pub fn encrypt_full_message_in_place(&mut self, input_output: &mut [u8], out_tag: &mut [u8]) {
- self.encrypt_in_place(input_output);
- self.finish_and_get_tag(out_tag);
- }
-
- pub(super) fn encrypt_in_place(&mut self, _input_output: &mut [u8]) {
- assert!(self.finished == false);
- }
-
- pub(super) fn finish_and_get_tag(&mut self, out_tag: &mut [u8]) {
- assert!(self.finished == false);
- out_tag.copy_from_slice(&self.tag);
- self.finished = true;
- }
-
- pub fn decrypt(&mut self, input: &[u8], output: &mut [u8], tag: &[u8]) -> bool {
- assert!(input.len() == output.len());
- assert!(self.finished == false);
-
- if tag[..] != self.tag[..] { return false; }
- output.copy_from_slice(input);
- self.finished = true;
- true
- }
-
- pub fn check_decrypt_in_place(&mut self, input_output: &mut [u8], tag: &[u8]) -> Result<(), ()> {
- self.decrypt_in_place(input_output);
- if self.finish_and_check_tag(tag) { Ok(()) } else { Err(()) }
- }
-
- pub(super) fn decrypt_in_place(&mut self, _input: &mut [u8]) {
- assert!(self.finished == false);
- }
-
- pub(super) fn finish_and_check_tag(&mut self, tag: &[u8]) -> bool {
- if tag[..] != self.tag[..] { return false; }
- self.finished = true;
- true
- }
- }
-}
-#[cfg(fuzzing)]
-pub use self::fuzzy_chachapoly::ChaCha20Poly1305RFC;
-
-#[cfg(test)]
-mod tests {
- use crate::ln::msgs::DecodeError;
- use super::{ChaChaPolyReadAdapter, ChaChaPolyWriteAdapter};
- use crate::util::ser::{self, FixedLengthReader, LengthReadableArgs, Writeable};
-
- // Used for for testing various lengths of serialization.
- #[derive(Debug, PartialEq, Eq)]
- struct TestWriteable {
- field1: Vec<u8>,
- field2: Vec<u8>,
- field3: Vec<u8>,
- }
- impl_writeable_tlv_based!(TestWriteable, {
- (1, field1, required_vec),
- (2, field2, required_vec),
- (3, field3, required_vec),
- });
-
- #[test]
- fn test_chacha_stream_adapters() {
- // Check that ChaChaPolyReadAdapter and ChaChaPolyWriteAdapter correctly encode and decode an
- // encrypted object.
- macro_rules! check_object_read_write {
- ($obj: expr) => {
- // First, serialize the object, encrypted with ChaCha20Poly1305.
- let rho = [42; 32];
- let writeable_len = $obj.serialized_length() as u64 + 16;
- let write_adapter = ChaChaPolyWriteAdapter::new(rho, &$obj);
- let encrypted_writeable_bytes = write_adapter.encode();
- let encrypted_writeable = &encrypted_writeable_bytes[..];
-
- // Now deserialize the object back and make sure it matches the original.
- let mut rd = FixedLengthReader::new(encrypted_writeable, writeable_len);
- let read_adapter = <ChaChaPolyReadAdapter<TestWriteable>>::read(&mut rd, rho).unwrap();
- assert_eq!($obj, read_adapter.readable);
- };
- }
-
- // Try a big object that will require multiple write buffers.
- let big_writeable = TestWriteable {
- field1: vec![43],
- field2: vec![44; 4192],
- field3: vec![45; 4192 + 1],
- };
- check_object_read_write!(big_writeable);
-
- // Try a small object that fits into one write buffer.
- let small_writeable = TestWriteable {
- field1: vec![43],
- field2: vec![44],
- field3: vec![45],
- };
- check_object_read_write!(small_writeable);
- }
-
- fn do_chacha_stream_adapters_ser_macros() -> Result<(), DecodeError> {
- let writeable = TestWriteable {
- field1: vec![43],
- field2: vec![44; 4192],
- field3: vec![45; 4192 + 1],
- };
-
- // First, serialize the object into a TLV stream, encrypted with ChaCha20Poly1305.
- let rho = [42; 32];
- let write_adapter = ChaChaPolyWriteAdapter::new(rho, &writeable);
- let mut writer = ser::VecWriter(Vec::new());
- encode_tlv_stream!(&mut writer, {
- (1, write_adapter, required),
- });
-
- // Now deserialize the object back and make sure it matches the original.
- let mut read_adapter: Option<ChaChaPolyReadAdapter<TestWriteable>> = None;
- decode_tlv_stream!(&writer.0[..], {
- (1, read_adapter, (option: LengthReadableArgs, rho)),
- });
- assert_eq!(writeable, read_adapter.unwrap().readable);
-
- Ok(())
- }
-
- #[test]
- fn chacha_stream_adapters_ser_macros() {
- // Test that our stream adapters work as expected with the TLV macros.
- // This also serves to test the `option: $trait` variant of the `_decode_tlv` ser macro.
- do_chacha_stream_adapters_ser_macros().unwrap()
- }
-}
+++ /dev/null
-use bitcoin::hashes::{Hash, HashEngine};
-use bitcoin::hashes::hmac::{Hmac, HmacEngine};
-use bitcoin::hashes::sha256::Hash as Sha256;
-use bitcoin::secp256k1::{Message, Secp256k1, SecretKey, ecdsa::Signature, Signing};
-
-use crate::sign::EntropySource;
-
-use core::ops::Deref;
-
-macro_rules! hkdf_extract_expand {
- ($salt: expr, $ikm: expr) => {{
- let mut hmac = HmacEngine::<Sha256>::new($salt);
- hmac.input($ikm);
- let prk = Hmac::from_engine(hmac).to_byte_array();
- let mut hmac = HmacEngine::<Sha256>::new(&prk[..]);
- hmac.input(&[1; 1]);
- let t1 = Hmac::from_engine(hmac).to_byte_array();
- let mut hmac = HmacEngine::<Sha256>::new(&prk[..]);
- hmac.input(&t1);
- hmac.input(&[2; 1]);
- (t1, Hmac::from_engine(hmac).to_byte_array(), prk)
- }};
- ($salt: expr, $ikm: expr, 2) => {{
- let (k1, k2, _) = hkdf_extract_expand!($salt, $ikm);
- (k1, k2)
- }};
- ($salt: expr, $ikm: expr, 5) => {{
- let (k1, k2, prk) = hkdf_extract_expand!($salt, $ikm);
-
- let mut hmac = HmacEngine::<Sha256>::new(&prk[..]);
- hmac.input(&k2);
- hmac.input(&[3; 1]);
- let k3 = Hmac::from_engine(hmac).to_byte_array();
-
- let mut hmac = HmacEngine::<Sha256>::new(&prk[..]);
- hmac.input(&k3);
- hmac.input(&[4; 1]);
- let k4 = Hmac::from_engine(hmac).to_byte_array();
-
- let mut hmac = HmacEngine::<Sha256>::new(&prk[..]);
- hmac.input(&k4);
- hmac.input(&[5; 1]);
- let k5 = Hmac::from_engine(hmac).to_byte_array();
-
- (k1, k2, k3, k4, k5)
- }}
-}
-
-pub fn hkdf_extract_expand_twice(salt: &[u8], ikm: &[u8]) -> ([u8; 32], [u8; 32]) {
- hkdf_extract_expand!(salt, ikm, 2)
-}
-
-pub fn hkdf_extract_expand_5x(salt: &[u8], ikm: &[u8]) -> ([u8; 32], [u8; 32], [u8; 32], [u8; 32], [u8; 32]) {
- hkdf_extract_expand!(salt, ikm, 5)
-}
-
-#[inline]
-pub fn sign<C: Signing>(ctx: &Secp256k1<C>, msg: &Message, sk: &SecretKey) -> Signature {
- #[cfg(feature = "grind_signatures")]
- let sig = ctx.sign_ecdsa_low_r(msg, sk);
- #[cfg(not(feature = "grind_signatures"))]
- let sig = ctx.sign_ecdsa(msg, sk);
- sig
-}
-
-#[inline]
-#[allow(unused_variables)]
-pub fn sign_with_aux_rand<C: Signing, ES: Deref>(
- ctx: &Secp256k1<C>, msg: &Message, sk: &SecretKey, entropy_source: &ES
-) -> Signature where ES::Target: EntropySource {
- #[cfg(feature = "grind_signatures")]
- let sig = loop {
- let sig = ctx.sign_ecdsa_with_noncedata(msg, sk, &entropy_source.get_secure_random_bytes());
- if sig.serialize_compact()[0] < 0x80 {
- break sig;
- }
- };
- #[cfg(all(not(feature = "grind_signatures"), not(feature = "_test_vectors")))]
- let sig = ctx.sign_ecdsa_with_noncedata(msg, sk, &entropy_source.get_secure_random_bytes());
- #[cfg(all(not(feature = "grind_signatures"), feature = "_test_vectors"))]
- let sig = sign(ctx, msg, sk);
- sig
-}
// licenses.
use crate::chain::transaction::OutPoint;
+use crate::ln::ChannelId;
use crate::sign::SpendableOutputDescriptor;
use bitcoin::hash_types::Txid;
pub(crate) struct DebugFundingChannelId<'a>(pub &'a Txid, pub u16);
impl<'a> core::fmt::Display for DebugFundingChannelId<'a> {
fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
- (OutPoint { txid: self.0.clone(), index: self.1 }).to_channel_id().fmt(f)
- }
-}
-macro_rules! log_funding_channel_id {
- ($funding_txid: expr, $funding_txo: expr) => {
- $crate::util::macro_logger::DebugFundingChannelId(&$funding_txid, $funding_txo)
+ ChannelId::v1_from_funding_outpoint(OutPoint { txid: self.0.clone(), index: self.1 }).fmt(f)
}
}
-pub(crate) struct DebugFundingInfo<'a, T: 'a>(pub &'a (OutPoint, T));
-impl<'a, T> core::fmt::Display for DebugFundingInfo<'a, T> {
+pub(crate) struct DebugFundingInfo<'a>(pub &'a ChannelId);
+impl<'a> core::fmt::Display for DebugFundingInfo<'a> {
fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
- (self.0).0.to_channel_id().fmt(f)
+ self.0.fmt(f)
}
}
macro_rules! log_funding_info {
($key_storage: expr) => {
- $crate::util::macro_logger::DebugFundingInfo(&$key_storage.get_funding_txo())
+ $crate::util::macro_logger::DebugFundingInfo(
+ &$key_storage.channel_id()
+ )
}
}
pub mod message_signing;
pub mod invoice;
pub mod persist;
+pub mod scid_utils;
pub mod string;
pub mod wakers;
#[cfg(fuzzing)]
pub(crate) mod atomic_counter;
pub(crate) mod byte_utils;
-pub(crate) mod chacha20;
-#[cfg(not(fuzzing))]
-pub(crate) mod poly1305;
-pub(crate) mod chacha20poly1305rfc;
pub(crate) mod transaction_utils;
-pub(crate) mod scid_utils;
pub(crate) mod time;
pub mod indexed_map;
#[macro_use]
pub(crate) mod macro_logger;
-/// Cryptography utilities.
-pub(crate) mod crypto;
-
// These have to come after macro_logger to build
pub mod logger;
pub mod config;
{
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
- let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
+ let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
- let cmu = &cmu_map.get(&added_monitors[0].0.to_channel_id()).unwrap()[0];
+ let cmu = &cmu_map.get(&added_monitors[0].1.channel_id()).unwrap()[0];
let test_txo = OutPoint { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
let ro_persister = MonitorUpdatingPersister {
+++ /dev/null
-// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
-// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
-// You may not use this file except in accordance with one or both of these
-// licenses.
-
-// This is a port of Andrew Moons poly1305-donna
-// https://github.com/floodyberry/poly1305-donna
-
-use core::cmp::min;
-use core::convert::TryInto;
-
-#[derive(Clone, Copy)]
-pub struct Poly1305 {
- r : [u32; 5],
- h : [u32; 5],
- pad : [u32; 4],
- leftover : usize,
- buffer : [u8; 16],
- finalized : bool,
-}
-
-impl Poly1305 {
- pub fn new(key: &[u8]) -> Poly1305 {
- assert!(key.len() == 32);
- let mut poly = Poly1305{ r: [0u32; 5], h: [0u32; 5], pad: [0u32; 4], leftover: 0, buffer: [0u8; 16], finalized: false };
-
- // r &= 0xffffffc0ffffffc0ffffffc0fffffff
- poly.r[0] = (u32::from_le_bytes(key[ 0.. 4].try_into().expect("len is 4")) ) & 0x3ffffff;
- poly.r[1] = (u32::from_le_bytes(key[ 3.. 7].try_into().expect("len is 4")) >> 2) & 0x3ffff03;
- poly.r[2] = (u32::from_le_bytes(key[ 6..10].try_into().expect("len is 4")) >> 4) & 0x3ffc0ff;
- poly.r[3] = (u32::from_le_bytes(key[ 9..13].try_into().expect("len is 4")) >> 6) & 0x3f03fff;
- poly.r[4] = (u32::from_le_bytes(key[12..16].try_into().expect("len is 4")) >> 8) & 0x00fffff;
-
- poly.pad[0] = u32::from_le_bytes(key[16..20].try_into().expect("len is 4"));
- poly.pad[1] = u32::from_le_bytes(key[20..24].try_into().expect("len is 4"));
- poly.pad[2] = u32::from_le_bytes(key[24..28].try_into().expect("len is 4"));
- poly.pad[3] = u32::from_le_bytes(key[28..32].try_into().expect("len is 4"));
-
- poly
- }
-
- fn block(&mut self, m: &[u8]) {
- let hibit : u32 = if self.finalized { 0 } else { 1 << 24 };
-
- let r0 = self.r[0];
- let r1 = self.r[1];
- let r2 = self.r[2];
- let r3 = self.r[3];
- let r4 = self.r[4];
-
- let s1 = r1 * 5;
- let s2 = r2 * 5;
- let s3 = r3 * 5;
- let s4 = r4 * 5;
-
- let mut h0 = self.h[0];
- let mut h1 = self.h[1];
- let mut h2 = self.h[2];
- let mut h3 = self.h[3];
- let mut h4 = self.h[4];
-
- // h += m
- h0 += (u32::from_le_bytes(m[ 0.. 4].try_into().expect("len is 4")) ) & 0x3ffffff;
- h1 += (u32::from_le_bytes(m[ 3.. 7].try_into().expect("len is 4")) >> 2) & 0x3ffffff;
- h2 += (u32::from_le_bytes(m[ 6..10].try_into().expect("len is 4")) >> 4) & 0x3ffffff;
- h3 += (u32::from_le_bytes(m[ 9..13].try_into().expect("len is 4")) >> 6) & 0x3ffffff;
- h4 += (u32::from_le_bytes(m[12..16].try_into().expect("len is 4")) >> 8) | hibit;
-
- // h *= r
- let d0 = (h0 as u64 * r0 as u64) + (h1 as u64 * s4 as u64) + (h2 as u64 * s3 as u64) + (h3 as u64 * s2 as u64) + (h4 as u64 * s1 as u64);
- let mut d1 = (h0 as u64 * r1 as u64) + (h1 as u64 * r0 as u64) + (h2 as u64 * s4 as u64) + (h3 as u64 * s3 as u64) + (h4 as u64 * s2 as u64);
- let mut d2 = (h0 as u64 * r2 as u64) + (h1 as u64 * r1 as u64) + (h2 as u64 * r0 as u64) + (h3 as u64 * s4 as u64) + (h4 as u64 * s3 as u64);
- let mut d3 = (h0 as u64 * r3 as u64) + (h1 as u64 * r2 as u64) + (h2 as u64 * r1 as u64) + (h3 as u64 * r0 as u64) + (h4 as u64 * s4 as u64);
- let mut d4 = (h0 as u64 * r4 as u64) + (h1 as u64 * r3 as u64) + (h2 as u64 * r2 as u64) + (h3 as u64 * r1 as u64) + (h4 as u64 * r0 as u64);
-
- // (partial) h %= p
- let mut c : u32;
- c = (d0 >> 26) as u32; h0 = d0 as u32 & 0x3ffffff;
- d1 += c as u64; c = (d1 >> 26) as u32; h1 = d1 as u32 & 0x3ffffff;
- d2 += c as u64; c = (d2 >> 26) as u32; h2 = d2 as u32 & 0x3ffffff;
- d3 += c as u64; c = (d3 >> 26) as u32; h3 = d3 as u32 & 0x3ffffff;
- d4 += c as u64; c = (d4 >> 26) as u32; h4 = d4 as u32 & 0x3ffffff;
- h0 += c * 5; c = h0 >> 26; h0 = h0 & 0x3ffffff;
- h1 += c;
-
- self.h[0] = h0;
- self.h[1] = h1;
- self.h[2] = h2;
- self.h[3] = h3;
- self.h[4] = h4;
- }
-
- pub fn finish(&mut self) {
- if self.leftover > 0 {
- self.buffer[self.leftover] = 1;
- for i in self.leftover+1..16 {
- self.buffer[i] = 0;
- }
- self.finalized = true;
- let tmp = self.buffer;
- self.block(&tmp);
- }
-
- // fully carry h
- let mut h0 = self.h[0];
- let mut h1 = self.h[1];
- let mut h2 = self.h[2];
- let mut h3 = self.h[3];
- let mut h4 = self.h[4];
-
- let mut c : u32;
- c = h1 >> 26; h1 = h1 & 0x3ffffff;
- h2 += c; c = h2 >> 26; h2 = h2 & 0x3ffffff;
- h3 += c; c = h3 >> 26; h3 = h3 & 0x3ffffff;
- h4 += c; c = h4 >> 26; h4 = h4 & 0x3ffffff;
- h0 += c * 5; c = h0 >> 26; h0 = h0 & 0x3ffffff;
- h1 += c;
-
- // compute h + -p
- let mut g0 = h0.wrapping_add(5); c = g0 >> 26; g0 &= 0x3ffffff;
- let mut g1 = h1.wrapping_add(c); c = g1 >> 26; g1 &= 0x3ffffff;
- let mut g2 = h2.wrapping_add(c); c = g2 >> 26; g2 &= 0x3ffffff;
- let mut g3 = h3.wrapping_add(c); c = g3 >> 26; g3 &= 0x3ffffff;
- let mut g4 = h4.wrapping_add(c).wrapping_sub(1 << 26);
-
- // select h if h < p, or h + -p if h >= p
- let mut mask = (g4 >> (32 - 1)).wrapping_sub(1);
- g0 &= mask;
- g1 &= mask;
- g2 &= mask;
- g3 &= mask;
- g4 &= mask;
- mask = !mask;
- h0 = (h0 & mask) | g0;
- h1 = (h1 & mask) | g1;
- h2 = (h2 & mask) | g2;
- h3 = (h3 & mask) | g3;
- h4 = (h4 & mask) | g4;
-
- // h = h % (2^128)
- h0 = ((h0 ) | (h1 << 26)) & 0xffffffff;
- h1 = ((h1 >> 6) | (h2 << 20)) & 0xffffffff;
- h2 = ((h2 >> 12) | (h3 << 14)) & 0xffffffff;
- h3 = ((h3 >> 18) | (h4 << 8)) & 0xffffffff;
-
- // h = mac = (h + pad) % (2^128)
- let mut f : u64;
- f = h0 as u64 + self.pad[0] as u64 ; h0 = f as u32;
- f = h1 as u64 + self.pad[1] as u64 + (f >> 32); h1 = f as u32;
- f = h2 as u64 + self.pad[2] as u64 + (f >> 32); h2 = f as u32;
- f = h3 as u64 + self.pad[3] as u64 + (f >> 32); h3 = f as u32;
-
- self.h[0] = h0;
- self.h[1] = h1;
- self.h[2] = h2;
- self.h[3] = h3;
- }
-
- pub fn input(&mut self, data: &[u8]) {
- assert!(!self.finalized);
- let mut m = data;
-
- if self.leftover > 0 {
- let want = min(16 - self.leftover, m.len());
- for i in 0..want {
- self.buffer[self.leftover+i] = m[i];
- }
- m = &m[want..];
- self.leftover += want;
-
- if self.leftover < 16 {
- return;
- }
-
- // self.block(self.buffer[..]);
- let tmp = self.buffer;
- self.block(&tmp);
-
- self.leftover = 0;
- }
-
- while m.len() >= 16 {
- self.block(&m[0..16]);
- m = &m[16..];
- }
-
- for i in 0..m.len() {
- self.buffer[i] = m[i];
- }
- self.leftover = m.len();
- }
-
- pub fn raw_result(&mut self, output: &mut [u8]) {
- assert!(output.len() >= 16);
- if !self.finalized{
- self.finish();
- }
- output[0..4].copy_from_slice(&self.h[0].to_le_bytes());
- output[4..8].copy_from_slice(&self.h[1].to_le_bytes());
- output[8..12].copy_from_slice(&self.h[2].to_le_bytes());
- output[12..16].copy_from_slice(&self.h[3].to_le_bytes());
- }
-}
-
-#[cfg(test)]
-mod test {
- use crate::prelude::*;
- use core::iter::repeat;
-
- use crate::util::poly1305::Poly1305;
-
- fn poly1305(key: &[u8], msg: &[u8], mac: &mut [u8]) {
- let mut poly = Poly1305::new(key);
- poly.input(msg);
- poly.raw_result(mac);
- }
-
- #[test]
- fn test_nacl_vector() {
- let key = [
- 0xee,0xa6,0xa7,0x25,0x1c,0x1e,0x72,0x91,
- 0x6d,0x11,0xc2,0xcb,0x21,0x4d,0x3c,0x25,
- 0x25,0x39,0x12,0x1d,0x8e,0x23,0x4e,0x65,
- 0x2d,0x65,0x1f,0xa4,0xc8,0xcf,0xf8,0x80,
- ];
-
- let msg = [
- 0x8e,0x99,0x3b,0x9f,0x48,0x68,0x12,0x73,
- 0xc2,0x96,0x50,0xba,0x32,0xfc,0x76,0xce,
- 0x48,0x33,0x2e,0xa7,0x16,0x4d,0x96,0xa4,
- 0x47,0x6f,0xb8,0xc5,0x31,0xa1,0x18,0x6a,
- 0xc0,0xdf,0xc1,0x7c,0x98,0xdc,0xe8,0x7b,
- 0x4d,0xa7,0xf0,0x11,0xec,0x48,0xc9,0x72,
- 0x71,0xd2,0xc2,0x0f,0x9b,0x92,0x8f,0xe2,
- 0x27,0x0d,0x6f,0xb8,0x63,0xd5,0x17,0x38,
- 0xb4,0x8e,0xee,0xe3,0x14,0xa7,0xcc,0x8a,
- 0xb9,0x32,0x16,0x45,0x48,0xe5,0x26,0xae,
- 0x90,0x22,0x43,0x68,0x51,0x7a,0xcf,0xea,
- 0xbd,0x6b,0xb3,0x73,0x2b,0xc0,0xe9,0xda,
- 0x99,0x83,0x2b,0x61,0xca,0x01,0xb6,0xde,
- 0x56,0x24,0x4a,0x9e,0x88,0xd5,0xf9,0xb3,
- 0x79,0x73,0xf6,0x22,0xa4,0x3d,0x14,0xa6,
- 0x59,0x9b,0x1f,0x65,0x4c,0xb4,0x5a,0x74,
- 0xe3,0x55,0xa5,
- ];
-
- let expected = [
- 0xf3,0xff,0xc7,0x70,0x3f,0x94,0x00,0xe5,
- 0x2a,0x7d,0xfb,0x4b,0x3d,0x33,0x05,0xd9,
- ];
-
- let mut mac = [0u8; 16];
- poly1305(&key, &msg, &mut mac);
- assert_eq!(&mac[..], &expected[..]);
-
- let mut poly = Poly1305::new(&key);
- poly.input(&msg[0..32]);
- poly.input(&msg[32..96]);
- poly.input(&msg[96..112]);
- poly.input(&msg[112..120]);
- poly.input(&msg[120..124]);
- poly.input(&msg[124..126]);
- poly.input(&msg[126..127]);
- poly.input(&msg[127..128]);
- poly.input(&msg[128..129]);
- poly.input(&msg[129..130]);
- poly.input(&msg[130..131]);
- poly.raw_result(&mut mac);
- assert_eq!(&mac[..], &expected[..]);
- }
-
- #[test]
- fn donna_self_test() {
- let wrap_key = [
- 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- ];
-
- let wrap_msg = [
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- ];
-
- let wrap_mac = [
- 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- ];
-
- let mut mac = [0u8; 16];
- poly1305(&wrap_key, &wrap_msg, &mut mac);
- assert_eq!(&mac[..], &wrap_mac[..]);
-
- let total_key = [
- 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xff,
- 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
- ];
-
- let total_mac = [
- 0x64, 0xaf, 0xe2, 0xe8, 0xd6, 0xad, 0x7b, 0xbd,
- 0xd2, 0x87, 0xf9, 0x7c, 0x44, 0x62, 0x3d, 0x39,
- ];
-
- let mut tpoly = Poly1305::new(&total_key);
- for i in 0..256 {
- let key: Vec<u8> = repeat(i as u8).take(32).collect();
- let msg: Vec<u8> = repeat(i as u8).take(256).collect();
- let mut mac = [0u8; 16];
- poly1305(&key[..], &msg[0..i], &mut mac);
- tpoly.input(&mac);
- }
- tpoly.raw_result(&mut mac);
- assert_eq!(&mac[..], &total_mac[..]);
- }
-
- #[test]
- fn test_tls_vectors() {
- // from http://tools.ietf.org/html/draft-agl-tls-chacha20poly1305-04
- let key = b"this is 32-byte key for Poly1305";
- let msg = [0u8; 32];
- let expected = [
- 0x49, 0xec, 0x78, 0x09, 0x0e, 0x48, 0x1e, 0xc6,
- 0xc2, 0x6b, 0x33, 0xb9, 0x1c, 0xcc, 0x03, 0x07,
- ];
- let mut mac = [0u8; 16];
- poly1305(key, &msg, &mut mac);
- assert_eq!(&mac[..], &expected[..]);
-
- let msg = b"Hello world!";
- let expected= [
- 0xa6, 0xf7, 0x45, 0x00, 0x8f, 0x81, 0xc9, 0x16,
- 0xa2, 0x0d, 0xcc, 0x74, 0xee, 0xf2, 0xb2, 0xf0,
- ];
- poly1305(key, msg, &mut mac);
- assert_eq!(&mac[..], &expected[..]);
- }
-}
// You may not use this file except in accordance with one or both of these
// licenses.
+//! Utilities for creating and parsing short channel ids.
+
/// Maximum block height that can be used in a `short_channel_id`. This
/// value is based on the 3-bytes available for block height.
pub const MAX_SCID_BLOCK: u64 = 0x00ffffff;
/// A `short_channel_id` construction error
#[derive(Debug, PartialEq, Eq)]
pub enum ShortChannelIdError {
+ /// Block height too high
BlockOverflow,
+ /// Tx index too high
TxIndexOverflow,
+ /// Vout index too high
VoutIndexOverflow,
}
use bitcoin::blockdata::constants::ChainHash;
use bitcoin::network::constants::Network;
use crate::sign::EntropySource;
- use crate::util::chacha20::ChaCha20;
+ use crate::crypto::chacha20::ChaCha20;
use crate::util::scid_utils;
use core::convert::TryInto;
/// into the fake scid.
#[derive(Copy, Clone)]
pub(crate) enum Namespace {
+ /// Phantom nodes namespace
Phantom,
+ /// SCID aliases for outbound private channels
OutboundAlias,
+ /// Payment interception namespace
Intercept
}
use crate::ln::script::ShutdownScript;
use crate::offers::invoice::{BlindedPayInfo, UnsignedBolt12Invoice};
use crate::offers::invoice_request::UnsignedInvoiceRequest;
-use crate::onion_message::{Destination, MessageRouter, OnionMessagePath};
+use crate::onion_message::messenger::{DefaultMessageRouter, Destination, MessageRouter, OnionMessagePath};
use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId, RoutingFees};
use crate::routing::utxo::{UtxoLookup, UtxoLookupError, UtxoResult};
-use crate::routing::router::{find_route, InFlightHtlcs, Path, Route, RouteParameters, RouteHintHop, Router, ScorerAccountingForInFlightHtlcs};
+use crate::routing::router::{DefaultRouter, InFlightHtlcs, Path, Route, RouteParameters, RouteHintHop, Router, ScorerAccountingForInFlightHtlcs};
use crate::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp};
use crate::sync::RwLock;
use crate::util::config::UserConfig;
}
pub struct TestRouter<'a> {
+ pub router: DefaultRouter<
+ Arc<NetworkGraph<&'a TestLogger>>,
+ &'a TestLogger,
+ &'a RwLock<TestScorer>,
+ (),
+ TestScorer,
+ >,
pub network_graph: Arc<NetworkGraph<&'a TestLogger>>,
pub next_routes: Mutex<VecDeque<(RouteParameters, Result<Route, LightningError>)>>,
pub scorer: &'a RwLock<TestScorer>,
}
impl<'a> TestRouter<'a> {
- pub fn new(network_graph: Arc<NetworkGraph<&'a TestLogger>>, scorer: &'a RwLock<TestScorer>) -> Self {
- Self { network_graph, next_routes: Mutex::new(VecDeque::new()), scorer }
+ pub fn new(
+ network_graph: Arc<NetworkGraph<&'a TestLogger>>, logger: &'a TestLogger,
+ scorer: &'a RwLock<TestScorer>
+ ) -> Self {
+ Self {
+ router: DefaultRouter::new(network_graph.clone(), logger, [42u8; 32], scorer, ()),
+ network_graph,
+ next_routes: Mutex::new(VecDeque::new()),
+ scorer,
+ }
}
pub fn expect_find_route(&self, query: RouteParameters, result: Result<Route, LightningError>) {
}
return find_route_res;
}
- let logger = TestLogger::new();
- find_route(
- payer, params, &self.network_graph, first_hops, &logger,
- &ScorerAccountingForInFlightHtlcs::new(self.scorer.read().unwrap(), &inflight_htlcs), &Default::default(),
- &[42; 32]
- )
+
+ self.router.find_route(payer, params, first_hops, inflight_htlcs)
}
fn create_blinded_payment_paths<
ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
>(
- &self, _recipient: PublicKey, _first_hops: Vec<ChannelDetails>, _tlvs: ReceiveTlvs,
- _amount_msats: u64, _entropy_source: &ES, _secp_ctx: &Secp256k1<T>
+ &self, recipient: PublicKey, first_hops: Vec<ChannelDetails>, tlvs: ReceiveTlvs,
+ amount_msats: u64, entropy_source: &ES, secp_ctx: &Secp256k1<T>
) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
- unreachable!()
+ self.router.create_blinded_payment_paths(
+ recipient, first_hops, tlvs, amount_msats, entropy_source, secp_ctx
+ )
}
}
impl<'a> MessageRouter for TestRouter<'a> {
fn find_path(
- &self, _sender: PublicKey, _peers: Vec<PublicKey>, _destination: Destination
+ &self, sender: PublicKey, peers: Vec<PublicKey>, destination: Destination
) -> Result<OnionMessagePath, ()> {
- unreachable!()
+ self.router.find_path(sender, peers, destination)
}
fn create_blinded_paths<
ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
>(
- &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _entropy_source: &ES,
- _secp_ctx: &Secp256k1<T>
+ &self, recipient: PublicKey, peers: Vec<PublicKey>, entropy_source: &ES,
+ secp_ctx: &Secp256k1<T>
) -> Result<Vec<BlindedPath>, ()> {
- unreachable!()
+ self.router.create_blinded_paths(recipient, peers, entropy_source, secp_ctx)
}
}
}
}
+pub struct TestMessageRouter<'a> {
+ inner: DefaultMessageRouter<Arc<NetworkGraph<&'a TestLogger>>, &'a TestLogger>,
+}
+
+impl<'a> TestMessageRouter<'a> {
+ pub fn new(network_graph: Arc<NetworkGraph<&'a TestLogger>>) -> Self {
+ Self { inner: DefaultMessageRouter::new(network_graph) }
+ }
+}
+
+impl<'a> MessageRouter for TestMessageRouter<'a> {
+ fn find_path(
+ &self, sender: PublicKey, peers: Vec<PublicKey>, destination: Destination
+ ) -> Result<OnionMessagePath, ()> {
+ self.inner.find_path(sender, peers, destination)
+ }
+
+ fn create_blinded_paths<
+ ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+ >(
+ &self, recipient: PublicKey, peers: Vec<PublicKey>, entropy_source: &ES,
+ secp_ctx: &Secp256k1<T>
+ ) -> Result<Vec<BlindedPath>, ()> {
+ self.inner.create_blinded_paths(recipient, peers, entropy_source, secp_ctx)
+ }
+}
+
pub struct OnlyReadsKeysInterface {}
impl EntropySource for OnlyReadsKeysInterface {
let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
&mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap().1;
assert!(new_monitor == monitor);
- self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(),
+ self.latest_monitor_update_id.lock().unwrap().insert(monitor.channel_id(),
(funding_txo, monitor.get_latest_update_id(), MonitorUpdateId::from_new_monitor(&monitor)));
self.added_monitors.lock().unwrap().push((funding_txo, monitor));
self.chain_monitor.watch_channel(funding_txo, new_monitor)
update.write(&mut w).unwrap();
assert!(channelmonitor::ChannelMonitorUpdate::read(
&mut io::Cursor::new(&w.0)).unwrap() == *update);
+ let channel_id = update.channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo));
- self.monitor_updates.lock().unwrap().entry(funding_txo.to_channel_id()).or_insert(Vec::new()).push(update.clone());
+ self.monitor_updates.lock().unwrap().entry(channel_id).or_insert(Vec::new()).push(update.clone());
if let Some(exp) = self.expect_channel_force_closed.lock().unwrap().take() {
- assert_eq!(funding_txo.to_channel_id(), exp.0);
+ assert_eq!(channel_id, exp.0);
assert_eq!(update.updates.len(), 1);
if let channelmonitor::ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
assert_eq!(should_broadcast, exp.1);
} else { panic!(); }
}
- self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(),
+ self.latest_monitor_update_id.lock().unwrap().insert(channel_id,
(funding_txo, update.update_id, MonitorUpdateId::from_monitor_update(update)));
let update_res = self.chain_monitor.update_channel(funding_txo, update);
// At every point where we get a monitor update, we should be able to send a useful monitor
let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
&mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap().1;
if let Some(chan_id) = self.expect_monitor_round_trip_fail.lock().unwrap().take() {
- assert_eq!(chan_id, funding_txo.to_channel_id());
+ assert_eq!(chan_id, channel_id);
assert!(new_monitor != *monitor);
} else {
assert!(new_monitor == *monitor);
update_res
}
- fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
+ fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
return self.chain_monitor.release_pending_monitor_events();
}
}
fn time_passed(&mut self, _duration_since_epoch: Duration) {}
}
+#[cfg(c_bindings)]
+impl crate::routing::scoring::Score for TestScorer {}
+
impl Drop for TestScorer {
fn drop(&mut self) {
#[cfg(feature = "std")] {
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-#[cfg(not(feature = "no-std"))]
+#[cfg(feature = "std")]
pub struct MonotonicTime(std::time::Instant);
/// The amount of time to shift `Instant` forward to prevent overflow when subtracting a `Duration`
/// from `Instant::now` on some operating systems (e.g., iOS representing `Instance` as `u64`).
-#[cfg(not(feature = "no-std"))]
+#[cfg(feature = "std")]
const SHIFT: Duration = Duration::from_secs(10 * 365 * 24 * 60 * 60); // 10 years.
-#[cfg(not(feature = "no-std"))]
+#[cfg(feature = "std")]
impl Time for MonotonicTime {
fn now() -> Self {
let instant = std::time::Instant::now().checked_add(SHIFT).expect("Overflow on MonotonicTime instantiation");
}
}
-#[cfg(not(feature = "no-std"))]
+#[cfg(feature = "std")]
impl Sub<Duration> for MonotonicTime {
type Output = Self;
}
#[test]
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
fn monotonic_time_subtracts() {
let now = super::MonotonicTime::now();
assert!(now.elapsed() < Duration::from_secs(10));
}
// Rather annoyingly, there's no safe way in Rust std to construct a Waker despite it being
- // totally possible to construct from a trait implementation (though somewhat less effecient
+ // totally possible to construct from a trait implementation (though somewhat less efficient
// compared to a raw VTable). Instead, we have to write out a lot of boilerplate to build a
// waker, which we do here with a trivial Arc<AtomicBool> data element to track woke-ness.
const WAKER_V_TABLE: RawWakerVTable = RawWakerVTable::new(waker_clone, wake, wake_by_ref, drop);