--- /dev/null
+name: Security Audit
+on:
+ workflow_dispatch:
+ schedule:
+ - cron: '0 0 * * *'
+
+jobs:
+ audit:
+ runs-on: ubuntu-latest
+ permissions:
+ issues: write
+ checks: write
+ steps:
+ - uses: actions/checkout@v3
+ - uses: rustsec/audit-check@v1.4.1
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ ignore: "RUSTSEC-2021-0145"
+ # RUSTSEC-2021-0145 pertains `atty`, which is a depencency of
+ # `criterion`. While the latter removed the depencency in its
+ # newest version, it would also require a higher `rustc`. We
+ # therefore avoid bumping it to allow benchmarking with our
+ # `rustc` 1.63 MSRV.
fuzz:
runs-on: ubuntu-latest
env:
- TOOLCHAIN: 1.58
+ TOOLCHAIN: 1.63
steps:
- name: Checkout source code
uses: actions/checkout@v3
run: |
sudo apt-get update
sudo apt-get -y install build-essential binutils-dev libunwind-dev
+ - name: Pin the regex dependency
+ run: |
+ cd fuzz && cargo update -p regex --precise "1.9.6" --verbose && cd ..
+ cd lightning-invoice/fuzz && cargo update -p regex --precise "1.9.6" --verbose
- name: Sanity check fuzz targets on Rust ${{ env.TOOLCHAIN }}
run: cd fuzz && RUSTFLAGS="--cfg=fuzzing" cargo test --verbose --color always
- name: Run fuzzers
- name: Run default clippy linting
run: |
cargo clippy -- -Aclippy::erasing_op -Aclippy::never_loop -Aclippy::if_same_then_else -Dclippy::try_err
+
+ rustfmt:
+ runs-on: ubuntu-latest
+ env:
+ TOOLCHAIN: 1.63.0
+ steps:
+ - name: Checkout source code
+ uses: actions/checkout@v3
+ - name: Install Rust ${{ env.TOOLCHAIN }} toolchain
+ run: |
+ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ env.TOOLCHAIN }}
+ rustup override set ${{ env.TOOLCHAIN }}
+ - name: Install rustfmt
+ run: |
+ rustup component add rustfmt
+ - name: Run rustfmt checks
+ run: ci/rustfmt.sh
lightning-custom-message/target
lightning-transaction-sync/target
no-std-check/target
+msrv-no-dev-deps-check/target
------------------
Use tabs. If you want to align lines, use spaces. Any desired alignment should
-display fine at any tab-length display setting.
+display fine at any tab-length display setting. We use `rustfmt` to establish
+uniform coding standards throughout the codebase. Please run
+
+```bash
+./ci/rustfmt.sh
+```
+
+before committing and pushing any changes, as compliance will also be checked
+and enforced by our CI scripts.
Our CI enforces [clippy's](https://github.com/rust-lang/rust-clippy) default
linting
"lightning-background-processor",
"lightning-rapid-gossip-sync",
"lightning-custom-message",
+ "lightning-transaction-sync",
+ "possiblyrandom",
]
exclude = [
- "lightning-transaction-sync",
"no-std-check",
"msrv-no-dev-deps-check",
"bench",
opt-level = 3
lto = true
panic = "abort"
+
+[patch.crates-io.possiblyrandom]
+path = "possiblyrandom"
[![Crate](https://img.shields.io/crates/v/lightning.svg?logo=rust)](https://crates.io/crates/lightning)
[![Documentation](https://img.shields.io/static/v1?logo=read-the-docs&label=docs.rs&message=lightning&color=informational)](https://docs.rs/lightning/)
[![Safety Dance](https://img.shields.io/badge/unsafe-forbidden-success.svg)](https://github.com/rust-secure-code/safety-dance/)
+[![Security Audit](https://github.com/lightningdevkit/rust-lightning/actions/workflows/audit.yml/badge.svg)](https://github.com/lightningdevkit/rust-lightning/actions/workflows/audit.yml)
-[LDK](https://lightningdevkit.org)/`rust-lightning` is a highly performant and flexible
+[LDK](https://lightningdevkit.org)/`rust-lightning` is a highly performant and flexible
implementation of the Lightning Network protocol.
The primary crate, `lightning`, is runtime-agnostic. Data persistence, chain interactions,
pass
elif feature == "no-std":
pass
+ elif feature == "possiblyrandom":
+ pass
+ elif feature == "getrandom":
+ pass
elif feature == "hashbrown":
pass
elif feature == "backtrace":
pass
elif cfg == "require_route_graph_test":
pass
+ elif cfg == "dual_funding":
+ pass
+ elif cfg == "splicing":
+ pass
else:
print("Bad cfg tag: " + cfg)
assert False
# The addr2line v0.21 crate (a dependency of `backtrace` starting with 0.3.69) relies on rustc 1.65
[ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p backtrace --precise "0.3.68" --verbose
+# Starting with version 0.5.9 (there is no .6-.8), the `home` crate has an MSRV of rustc 1.70.0.
+[ "$RUSTC_MINOR_VERSION" -lt 70 ] && cargo update -p home --precise "0.5.5" --verbose
+
export RUST_BACKTRACE=1
+# Build `lightning-transaction-sync` in no_download mode.
+export RUSTFLAGS="$RUSTFLAGS --cfg no_download"
+
echo -e "\n\nBuilding and testing all workspace crates..."
cargo test --verbose --color always
cargo check --verbose --color always
echo -e "\n\nBuilding and testing Transaction Sync Clients with features"
pushd lightning-transaction-sync
- # reqwest 0.11.21 had a regression that broke its 1.63.0 MSRV
- [ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p reqwest --precise "0.11.20" --verbose
- # Starting with version 1.10.0, the `regex` crate has an MSRV of rustc 1.65.0.
- [ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p regex --precise "1.9.6" --verbose
- # Starting with version 0.5.9 (there is no .6-.8), the `home` crate has an MSRV of rustc 1.70.0.
- [ "$RUSTC_MINOR_VERSION" -lt 70 ] && cargo update -p home --precise "0.5.5" --verbose
-
DOWNLOAD_ELECTRS_AND_BITCOIND
- RUSTFLAGS="$RUSTFLAGS --cfg no_download" cargo test --verbose --color always --features esplora-blocking
- RUSTFLAGS="$RUSTFLAGS --cfg no_download" cargo check --verbose --color always --features esplora-blocking
- RUSTFLAGS="$RUSTFLAGS --cfg no_download" cargo test --verbose --color always --features esplora-async
- RUSTFLAGS="$RUSTFLAGS --cfg no_download" cargo check --verbose --color always --features esplora-async
- RUSTFLAGS="$RUSTFLAGS --cfg no_download" cargo test --verbose --color always --features esplora-async-https
- RUSTFLAGS="$RUSTFLAGS --cfg no_download" cargo check --verbose --color always --features esplora-async-https
- RUSTFLAGS="$RUSTFLAGS --cfg no_download" cargo test --verbose --color always --features electrum
- RUSTFLAGS="$RUSTFLAGS --cfg no_download" cargo check --verbose --color always --features electrum
-
+ cargo test --verbose --color always --features esplora-blocking
+ cargo check --verbose --color always --features esplora-blocking
+ cargo test --verbose --color always --features esplora-async
+ cargo check --verbose --color always --features esplora-async
+ cargo test --verbose --color always --features esplora-async-https
+ cargo check --verbose --color always --features esplora-async-https
+ cargo test --verbose --color always --features electrum
+ cargo check --verbose --color always --features electrum
popd
fi
echo -e "\n\nTest cfg-flag builds"
RUSTFLAGS="--cfg=taproot" cargo test --verbose --color always -p lightning
+[ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean
RUSTFLAGS="--cfg=async_signing" cargo test --verbose --color always -p lightning
+[ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean
+RUSTFLAGS="--cfg=dual_funding" cargo test --verbose --color always -p lightning
+[ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean
+RUSTFLAGS="--cfg=splicing" cargo test --verbose --color always -p lightning
--- /dev/null
+#!/bin/bash
+set -eox pipefail
+
+# Generate initial exclusion list
+#find . -name '*.rs' -type f |sort >rustfmt_excluded_files
+
+# The +rustversion syntax only works with rustup-installed rust toolchains,
+# not with any distro-provided ones. Thus, we check for a rustup install and
+# only pass +1.63.0 if we find one.
+VERS=""
+[ "$(which rustup)" != "" ] && VERS="+1.63.0"
+
+# Run fmt
+TMP_FILE=$(mktemp)
+find . -name '*.rs' -type f |sort >$TMP_FILE
+for file in $(comm -23 $TMP_FILE rustfmt_excluded_files); do
+ echo "Checking formatting of $file"
+ rustfmt $VERS --check $file
+done
lightning-rapid-gossip-sync = { path = "../lightning-rapid-gossip-sync" }
bitcoin = { version = "0.30.2", features = ["secp-lowmemory"] }
hex = { package = "hex-conservative", version = "0.1.1", default-features = false }
-hashbrown = "0.8"
afl = { version = "0.12", optional = true }
honggfuzz = { version = "0.5", optional = true, default-features = false }
use lightning::sign::{KeyMaterial, InMemorySigner, Recipient, EntropySource, NodeSigner, SignerProvider};
use lightning::events;
use lightning::events::MessageSendEventsProvider;
-use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
+use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
use lightning::ln::channelmanager::{ChainParameters, ChannelDetails, ChannelManager, PaymentSendFailure, ChannelManagerReadArgs, PaymentId, RecipientOnionFields};
use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
use lightning::ln::msgs::{self, CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init};
use lightning::onion_message::messenger::{Destination, MessageRouter, OnionMessagePath};
use lightning::util::test_channel_signer::{TestChannelSigner, EnforcementState};
use lightning::util::errors::APIError;
+use lightning::util::hash_tables::*;
use lightning::util::logger::Logger;
use lightning::util::config::UserConfig;
use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer};
use std::mem;
use std::cmp::{self, Ordering};
-use hashbrown::{HashSet, hash_map, HashMap};
use std::sync::{Arc,Mutex};
use std::sync::atomic;
use std::io::Cursor;
// Background feerate which is <= the minimum Normal feerate.
match conf_target {
ConfirmationTarget::OnChainSweep => MAX_FEE,
- ConfirmationTarget::ChannelCloseMinimum|ConfirmationTarget::AnchorChannelFee|ConfirmationTarget::MinAllowedAnchorChannelRemoteFee|ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee => 253,
+ ConfirmationTarget::ChannelCloseMinimum|ConfirmationTarget::AnchorChannelFee|ConfirmationTarget::MinAllowedAnchorChannelRemoteFee|ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee|ConfirmationTarget::OutputSpendingFee => 253,
ConfirmationTarget::NonAnchorChannelFee => cmp::min(self.ret_val.load(atomic::Ordering::Acquire), MAX_FEE),
}
}
})
}
- fn create_blinded_payment_paths<
- ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
- >(
+ fn create_blinded_payment_paths<T: secp256k1::Signing + secp256k1::Verification>(
&self, _recipient: PublicKey, _first_hops: Vec<ChannelDetails>, _tlvs: ReceiveTlvs,
- _amount_msats: u64, _entropy_source: &ES, _secp_ctx: &Secp256k1<T>
+ _amount_msats: u64, _secp_ctx: &Secp256k1<T>,
) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
unreachable!()
}
unreachable!()
}
- fn create_blinded_paths<
- ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
- >(
- &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _entropy_source: &ES,
- _secp_ctx: &Secp256k1<T>
+ fn create_blinded_paths<T: secp256k1::Signing + secp256k1::Verification>(
+ &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _secp_ctx: &Secp256k1<T>,
) -> Result<Vec<BlindedPath>, ()> {
unreachable!()
}
logger,
keys,
persister,
- latest_monitors: Mutex::new(HashMap::new()),
+ latest_monitors: Mutex::new(new_hash_map()),
}
}
}
fn update_channel(&self, funding_txo: OutPoint, update: &channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus {
let mut map_lock = self.latest_monitors.lock().unwrap();
- let mut map_entry = match map_lock.entry(funding_txo) {
- hash_map::Entry::Occupied(entry) => entry,
- hash_map::Entry::Vacant(_) => panic!("Didn't have monitor on update call"),
- };
+ let map_entry = map_lock.get_mut(&funding_txo).expect("Didn't have monitor on update call");
let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::
- read(&mut Cursor::new(&map_entry.get().1), (&*self.keys, &*self.keys)).unwrap().1;
+ read(&mut Cursor::new(&map_entry.1), (&*self.keys, &*self.keys)).unwrap().1;
deserialized_monitor.update_monitor(update, &&TestBroadcaster{}, &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap();
let mut ser = VecWriter(Vec::new());
deserialized_monitor.write(&mut ser).unwrap();
- map_entry.insert((update.update_id, ser.0));
+ *map_entry = (update.update_id, ser.0);
self.chain_monitor.update_channel(funding_txo, update)
}
- fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
+ fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
return self.chain_monitor.release_pending_monitor_events();
}
}
($node_id: expr, $fee_estimator: expr) => { {
let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
let node_secret = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, $node_id]).unwrap();
- let keys_manager = Arc::new(KeyProvider { node_secret, rand_bytes_id: atomic::AtomicU32::new(0), enforcement_states: Mutex::new(HashMap::new()) });
+ let keys_manager = Arc::new(KeyProvider { node_secret, rand_bytes_id: atomic::AtomicU32::new(0), enforcement_states: Mutex::new(new_hash_map()) });
let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(),
Arc::new(TestPersister {
update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed)
config.manually_accept_inbound_channels = true;
}
- let mut monitors = HashMap::new();
+ let mut monitors = new_hash_map();
let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap();
for (outpoint, (update_id, monitor_ser)) in old_monitors.drain() {
monitors.insert(outpoint, <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(&mut Cursor::new(&monitor_ser), (&*$keys_manager, &*$keys_manager)).expect("Failed to read monitor").1);
chain_monitor.latest_monitors.lock().unwrap().insert(outpoint, (update_id, monitor_ser));
}
- let mut monitor_refs = HashMap::new();
+ let mut monitor_refs = new_hash_map();
for (outpoint, monitor) in monitors.iter_mut() {
monitor_refs.insert(*outpoint, monitor);
}
// In case we get 256 payments we may have a hash collision, resulting in the
// second claim/fail call not finding the duplicate-hash HTLC, so we have to
// deduplicate the calls here.
- let mut claim_set = HashSet::new();
+ let mut claim_set = new_hash_map();
let mut events = nodes[$node].get_and_clear_pending_events();
// Sort events so that PendingHTLCsForwardable get processed last. This avoids a
// case where we first process a PendingHTLCsForwardable, then claim/fail on a
for event in events.drain(..) {
match event {
events::Event::PaymentClaimable { payment_hash, .. } => {
- if claim_set.insert(payment_hash.0) {
+ if claim_set.insert(payment_hash.0, ()).is_none() {
if $fail {
nodes[$node].fail_htlc_backwards(&payment_hash);
} else {
use bitcoin::network::constants::Network;
use bitcoin::hashes::hex::FromHex;
-use bitcoin::hashes::Hash as TraitImport;
-use bitcoin::hashes::HashEngine as TraitImportEngine;
+use bitcoin::hashes::Hash as _;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash};
use lightning::sign::{InMemorySigner, Recipient, KeyMaterial, EntropySource, NodeSigner, SignerProvider};
use lightning::events::Event;
use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
-use lightning::ln::channelmanager::{ChainParameters, ChannelDetails, ChannelManager, PaymentId, RecipientOnionFields, Retry};
+use lightning::ln::channelmanager::{ChainParameters, ChannelDetails, ChannelManager, PaymentId, RecipientOnionFields, Retry, InterceptId};
use lightning::ln::peer_handler::{MessageHandler,PeerManager,SocketDescriptor,IgnoringMessageHandler};
use lightning::ln::msgs::{self, DecodeError};
use lightning::ln::script::ShutdownScript;
use lightning::routing::gossip::{P2PGossipSync, NetworkGraph};
use lightning::routing::utxo::UtxoLookup;
use lightning::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteParameters, Router};
-use lightning::util::config::{UserConfig, MaxDustHTLCExposure};
+use lightning::util::config::{ChannelConfig, UserConfig};
+use lightning::util::hash_tables::*;
use lightning::util::errors::APIError;
use lightning::util::test_channel_signer::{TestChannelSigner, EnforcementState};
use lightning::util::logger::Logger;
-use lightning::util::ser::{ReadableArgs, Writeable};
+use lightning::util::ser::{Readable, ReadableArgs, Writeable};
use crate::utils::test_logger;
use crate::utils::test_persister::TestPersister;
use bitcoin::secp256k1::schnorr;
use std::cell::RefCell;
-use hashbrown::{HashMap, hash_map};
use std::convert::TryInto;
use std::cmp;
use std::sync::{Arc, Mutex};
((v[1] as u16) << 8*0)
}
+#[inline]
+pub fn be16_to_array(u: u16) -> [u8; 2] {
+ let mut v = [0; 2];
+ v[0] = ((u >> 8*1) & 0xff) as u8;
+ v[1] = ((u >> 8*0) & 0xff) as u8;
+ v
+}
+
#[inline]
pub fn slice_to_be24(v: &[u8]) -> u32 {
((v[0] as u32) << 8*2) |
((v[2] as u32) << 8*0)
}
-#[inline]
-pub fn slice_to_be32(v: &[u8]) -> u32 {
- ((v[0] as u32) << 8*3) |
- ((v[1] as u32) << 8*2) |
- ((v[2] as u32) << 8*1) |
- ((v[3] as u32) << 8*0)
-}
-
-#[inline]
-pub fn be64_to_array(u: u64) -> [u8; 8] {
- let mut v = [0; 8];
- v[0] = ((u >> 8*7) & 0xff) as u8;
- v[1] = ((u >> 8*6) & 0xff) as u8;
- v[2] = ((u >> 8*5) & 0xff) as u8;
- v[3] = ((u >> 8*4) & 0xff) as u8;
- v[4] = ((u >> 8*3) & 0xff) as u8;
- v[5] = ((u >> 8*2) & 0xff) as u8;
- v[6] = ((u >> 8*1) & 0xff) as u8;
- v[7] = ((u >> 8*0) & 0xff) as u8;
- v
-}
-
struct InputData {
data: Vec<u8>,
read_pos: AtomicUsize,
Some(&self.data[old_pos..old_pos + len])
}
}
+impl std::io::Read for &InputData {
+ fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
+ if let Some(sl) = self.get_slice(buf.len()) {
+ buf.copy_from_slice(sl);
+ Ok(buf.len())
+ } else {
+ Ok(0)
+ }
+ }
+}
struct FuzzEstimator {
input: Arc<InputData>,
})
}
- fn create_blinded_payment_paths<
- ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
- >(
+ fn create_blinded_payment_paths<T: secp256k1::Signing + secp256k1::Verification>(
&self, _recipient: PublicKey, _first_hops: Vec<ChannelDetails>, _tlvs: ReceiveTlvs,
- _amount_msats: u64, _entropy_source: &ES, _secp_ctx: &Secp256k1<T>
+ _amount_msats: u64, _secp_ctx: &Secp256k1<T>,
) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
unreachable!()
}
unreachable!()
}
- fn create_blinded_paths<
- ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
- >(
- &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _entropy_source: &ES,
- _secp_ctx: &Secp256k1<T>
+ fn create_blinded_paths<T: secp256k1::Signing + secp256k1::Verification>(
+ &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _secp_ctx: &Secp256k1<T>,
) -> Result<Vec<BlindedPath>, ()> {
unreachable!()
}
peers,
funding_txn: Vec::new(),
- txids_confirmed: HashMap::new(),
+ txids_confirmed: new_hash_map(),
header_hashes: vec![(genesis_block(Network::Bitcoin).block_hash(), 0)],
height: 0,
max_height: 0,
let mut txdata = Vec::with_capacity(all_txn.len());
for (idx, tx) in all_txn.iter().enumerate() {
let txid = tx.txid();
- match self.txids_confirmed.entry(txid) {
- hash_map::Entry::Vacant(e) => {
- e.insert(self.height);
- txdata.push((idx + 1, tx));
- },
- _ => {},
- }
+ self.txids_confirmed.entry(txid).or_insert_with(|| {
+ txdata.push((idx + 1, tx));
+ self.height
+ });
}
self.blocks_connected += 1;
}
#[inline]
-pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
+pub fn do_test(mut data: &[u8], logger: &Arc<dyn Logger>) {
+ if data.len() < 32 { return; }
+
+ let our_network_key = match SecretKey::from_slice(&data[..32]) {
+ Ok(key) => key,
+ Err(_) => return,
+ };
+ data = &data[32..];
+
+ let config: UserConfig = if let Ok(config) = Readable::read(&mut data) { config } else { return; };
+
let input = Arc::new(InputData {
data: data.to_vec(),
read_pos: AtomicUsize::new(0),
}
}
+ macro_rules! get_bytes {
+ ($len: expr) => { {
+ let mut res = [0; $len];
+ match input.get_slice($len as usize) {
+ Some(slice) => res.copy_from_slice(slice),
+ None => return,
+ }
+ res
+ } }
+ }
+
macro_rules! get_pubkey {
() => {
match PublicKey::from_slice(get_slice!(33)) {
}
}
- let our_network_key = match SecretKey::from_slice(get_slice!(32)) {
- Ok(key) => key,
- Err(_) => return,
- };
let inbound_payment_key = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 42];
node_secret: our_network_key.clone(),
inbound_payment_key: KeyMaterial(inbound_payment_key.try_into().unwrap()),
counter: AtomicU64::new(0),
- signer_state: RefCell::new(HashMap::new())
+ signer_state: RefCell::new(new_hash_map())
});
- let mut config = UserConfig::default();
- config.channel_config.forwarding_fee_proportional_millionths = slice_to_be32(get_slice!(4));
- config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);
- config.channel_handshake_config.announced_channel = get_slice!(1)[0] != 0;
let network = Network::Bitcoin;
let best_block_timestamp = genesis_block(network).header.time;
let params = ChainParameters {
let mut should_forward = false;
let mut payments_received: Vec<PaymentHash> = Vec::new();
- let mut payments_sent = 0;
+ let mut intercepted_htlcs: Vec<InterceptId> = Vec::new();
+ let mut payments_sent: u16 = 0;
let mut pending_funding_generation: Vec<(ChannelId, PublicKey, u64, ScriptBuf)> = Vec::new();
- let mut pending_funding_signatures = HashMap::new();
+ let mut pending_funding_signatures = new_hash_map();
loop {
match get_slice!(1)[0] {
let params = RouteParameters::from_payment_params_and_value(
payment_params, final_value_msat);
let mut payment_hash = PaymentHash([0; 32]);
- payment_hash.0[0..8].copy_from_slice(&be64_to_array(payments_sent));
+ payment_hash.0[0..2].copy_from_slice(&be16_to_array(payments_sent));
payment_hash.0 = Sha256::hash(&payment_hash.0[..]).to_byte_array();
payments_sent += 1;
- match channelmanager.send_payment(payment_hash,
- RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), params,
- Retry::Attempts(0))
- {
- Ok(_) => {},
- Err(_) => return,
- }
+ let _ = channelmanager.send_payment(
+ payment_hash, RecipientOnionFields::spontaneous_empty(),
+ PaymentId(payment_hash.0), params, Retry::Attempts(2)
+ );
},
15 => {
let final_value_msat = slice_to_be24(get_slice!(3)) as u64;
let params = RouteParameters::from_payment_params_and_value(
payment_params, final_value_msat);
let mut payment_hash = PaymentHash([0; 32]);
- payment_hash.0[0..8].copy_from_slice(&be64_to_array(payments_sent));
+ payment_hash.0[0..2].copy_from_slice(&be16_to_array(payments_sent));
payment_hash.0 = Sha256::hash(&payment_hash.0[..]).to_byte_array();
payments_sent += 1;
let mut payment_secret = PaymentSecret([0; 32]);
- payment_secret.0[0..8].copy_from_slice(&be64_to_array(payments_sent));
+ payment_secret.0[0..2].copy_from_slice(&be16_to_array(payments_sent));
payments_sent += 1;
- match channelmanager.send_payment(payment_hash,
- RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0),
- params, Retry::Attempts(0))
- {
- Ok(_) => {},
- Err(_) => return,
- }
+ let _ = channelmanager.send_payment(
+ payment_hash, RecipientOnionFields::secret_only(payment_secret),
+ PaymentId(payment_hash.0), params, Retry::Attempts(2)
+ );
+ },
+ 17 => {
+ let final_value_msat = slice_to_be24(get_slice!(3)) as u64;
+ let payment_params = PaymentParameters::from_node_id(get_pubkey!(), 42);
+ let params = RouteParameters::from_payment_params_and_value(
+ payment_params, final_value_msat);
+ let _ = channelmanager.send_preflight_probes(params, None);
+ },
+ 18 => {
+ let idx = u16::from_be_bytes(get_bytes!(2)) % cmp::max(payments_sent, 1);
+ let mut payment_id = PaymentId([0; 32]);
+ payment_id.0[0..2].copy_from_slice(&idx.to_be_bytes());
+ channelmanager.abandon_payment(payment_id);
},
5 => {
let peer_id = get_slice!(1)[0];
}
},
10 => {
- 'outer_loop: for funding_generation in pending_funding_generation.drain(..) {
- let mut tx = Transaction { version: 0, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
- value: funding_generation.2, script_pubkey: funding_generation.3,
- }] };
- let funding_output = 'search_loop: loop {
- let funding_txid = tx.txid();
- if let None = loss_detector.txids_confirmed.get(&funding_txid) {
- let outpoint = OutPoint { txid: funding_txid, index: 0 };
- for chan in channelmanager.list_channels() {
- if chan.channel_id == outpoint.to_channel_id() {
- tx.version += 1;
- continue 'search_loop;
- }
+ let mut tx = Transaction { version: 0, lock_time: LockTime::ZERO, input: Vec::new(), output: Vec::new() };
+ let mut channels = Vec::new();
+ for funding_generation in pending_funding_generation.drain(..) {
+ let txout = TxOut {
+ value: funding_generation.2, script_pubkey: funding_generation.3,
+ };
+ if !tx.output.contains(&txout) {
+ tx.output.push(txout);
+ channels.push((funding_generation.0, funding_generation.1));
+ }
+ }
+ // Once we switch to V2 channel opens we should be able to drop this entirely as
+ // channel_ids no longer change when we set the funding tx.
+ 'search_loop: loop {
+ if tx.version > 0xff {
+ break;
+ }
+ let funding_txid = tx.txid();
+ if loss_detector.txids_confirmed.get(&funding_txid).is_none() {
+ let outpoint = OutPoint { txid: funding_txid, index: 0 };
+ for chan in channelmanager.list_channels() {
+ if chan.channel_id == ChannelId::v1_from_funding_outpoint(outpoint) {
+ tx.version += 1;
+ continue 'search_loop;
}
- break outpoint;
}
- tx.version += 1;
- if tx.version > 0xff {
- continue 'outer_loop;
- }
- };
- if let Err(e) = channelmanager.funding_transaction_generated(&funding_generation.0, &funding_generation.1, tx.clone()) {
+ break;
+ }
+ tx.version += 1;
+ }
+ if tx.version <= 0xff && !channels.is_empty() {
+ let chans = channels.iter().map(|(a, b)| (a, b)).collect::<Vec<_>>();
+ if let Err(e) = channelmanager.batch_funding_transaction_generated(&chans, tx.clone()) {
// It's possible the channel has been closed in the mean time, but any other
// failure may be a bug.
if let APIError::ChannelUnavailable { .. } = e { } else { panic!(); }
}
- pending_funding_signatures.insert(funding_output, tx);
+ let funding_txid = tx.txid();
+ for idx in 0..tx.output.len() {
+ let outpoint = OutPoint { txid: funding_txid, index: idx as u16 };
+ pending_funding_signatures.insert(outpoint, tx.clone());
+ }
}
},
11 => {
}
},
12 => {
- let txlen = slice_to_be16(get_slice!(2));
+ let txlen = u16::from_be_bytes(get_bytes!(2));
if txlen == 0 {
loss_detector.connect_block(&[]);
} else {
channels.sort_by(|a, b| { a.channel_id.cmp(&b.channel_id) });
channelmanager.force_close_broadcasting_latest_txn(&channels[channel_id].channel_id, &channels[channel_id].counterparty.node_id).unwrap();
},
- // 15 is above
+ // 15, 16, 17, 18 is above
+ 19 => {
+ let mut list = loss_detector.handler.list_peers();
+ list.sort_by_key(|v| v.counterparty_node_id);
+ if let Some(peer_details) = list.get(0) {
+ loss_detector.handler.disconnect_by_node_id(peer_details.counterparty_node_id);
+ }
+ },
+ 20 => loss_detector.handler.disconnect_all_peers(),
+ 21 => loss_detector.handler.timer_tick_occurred(),
+ 22 =>
+ loss_detector.handler.broadcast_node_announcement([42; 3], [43; 32], Vec::new()),
+ 32 => channelmanager.timer_tick_occurred(),
+ 33 => {
+ for id in intercepted_htlcs.drain(..) {
+ channelmanager.fail_intercepted_htlc(id).unwrap();
+ }
+ }
+ 34 => {
+ let amt = u64::from_be_bytes(get_bytes!(8));
+ let chans = channelmanager.list_channels();
+ for id in intercepted_htlcs.drain(..) {
+ if chans.is_empty() {
+ channelmanager.fail_intercepted_htlc(id).unwrap();
+ } else {
+ let chan = &chans[amt as usize % chans.len()];
+ channelmanager.forward_intercepted_htlc(id, &chan.channel_id, chan.counterparty.node_id, amt).unwrap();
+ }
+ }
+ }
+ 35 => {
+ let config: ChannelConfig =
+ if let Ok(c) = Readable::read(&mut &*input) { c } else { return; };
+ let chans = channelmanager.list_channels();
+ if let Some(chan) = chans.get(0) {
+ let _ = channelmanager.update_channel_config(
+ &chan.counterparty.node_id, &[chan.channel_id], &config
+ );
+ }
+ }
_ => return,
}
loss_detector.handler.process_events();
Event::PendingHTLCsForwardable {..} => {
should_forward = true;
},
+ Event::HTLCIntercepted { intercept_id, .. } => {
+ if !intercepted_htlcs.contains(&intercept_id) {
+ intercepted_htlcs.push(intercept_id);
+ }
+ },
_ => {},
}
}
}
}
+ fn ext_from_hex(hex_with_spaces: &str, out: &mut Vec<u8>) {
+ for hex in hex_with_spaces.split(" ") {
+ out.append(&mut <Vec<u8>>::from_hex(hex).unwrap());
+ }
+ }
+
#[test]
fn test_no_existing_test_breakage() {
// To avoid accidentally causing all existing fuzz test cases to be useless by making minor
// so this should be updated pretty liberally, but at least we'll know when changes occur.
// If nothing else, this test serves as a pretty great initial full_stack_target seed.
- // What each byte represents is broken down below, and then everything is concatenated into
- // one large test at the end (you want %s/ -.*//g %s/\n\| \|\t\|\///g).
-
// Following BOLT 8, lightning message on the wire are: 2-byte encrypted message length +
// 16-byte MAC of the encrypted message length + encrypted Lightning message + 16-byte MAC
// of the Lightning message
// Writing new code generating transactions and see a new failure ? Don't forget to add input for the FuzzEstimator !
- // 0100000000000000000000000000000000000000000000000000000000000000 - our network key
- // 00000000 - fee_proportional_millionths
- // 01 - announce_channels_publicly
- //
- // 00 - new outbound connection with id 0
- // 030000000000000000000000000000000000000000000000000000000000000002 - peer's pubkey
- // 030032 - inbound read from peer id 0 of len 50
- // 00 030000000000000000000000000000000000000000000000000000000000000002 03000000000000000000000000000000 - noise act two (0||pubkey||mac)
- //
- // 030012 - inbound read from peer id 0 of len 18
- // 0010 03000000000000000000000000000000 - message header indicating message length 16
- // 030020 - inbound read from peer id 0 of len 32
- // 0010 00021aaa 0008aaaaaaaaaaaa9aaa 03000000000000000000000000000000 - init message (type 16) with static_remotekey required and other bits optional and mac
- //
- // 030012 - inbound read from peer id 0 of len 18
- // 0147 03000000000000000000000000000000 - message header indicating message length 327
- // 0300fe - inbound read from peer id 0 of len 254
- // 0020 6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000 ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679 000000000000c350 0000000000000000 0000000000000162 ffffffffffffffff 0000000000000222 0000000000000000 000000fd 0006 01e3 030000000000000000000000000000000000000000000000000000000000000001 030000000000000000000000000000000000000000000000000000000000000002 030000000000000000000000000000000000000000000000000000000000000003 030000000000000000000000000000000000000000000000000000000000000004 - beginning of open_channel message
- // 030059 - inbound read from peer id 0 of len 89
- // 030000000000000000000000000000000000000000000000000000000000000005 020900000000000000000000000000000000000000000000000000000000000000 01 0000 01021000 03000000000000000000000000000000 - rest of open_channel and mac
- //
- // 00fd - Two feerate requests (all returning min feerate, which our open_channel also uses) (gonna be ingested by FuzzEstimator)
- // - client should now respond with accept_channel (CHECK 1: type 33 to peer 03000000)
- //
- // 030012 - inbound read from peer id 0 of len 18
- // 0084 03000000000000000000000000000000 - message header indicating message length 132
- // 030094 - inbound read from peer id 0 of len 148
- // 0022 ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679 3d00000000000000000000000000000000000000000000000000000000000000 0000 00000000000000000000000000000000000000000000000000000000000000210100000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - funding_created and mac
- // - client should now respond with funding_signed (CHECK 2: type 35 to peer 03000000)
- //
- // 0c005e - connect a block with one transaction of len 94
- // 020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae0000000000000000000000000000000000000000000000000000000000000000000000 - the funding transaction
- // 0c0000 - connect a block with no transactions
- // 0c0000 - connect a block with no transactions
- // 0c0000 - connect a block with no transactions
- // 0c0000 - connect a block with no transactions
- // 0c0000 - connect a block with no transactions
- // 0c0000 - connect a block with no transactions
- // 0c0000 - connect a block with no transactions
- // 0c0000 - connect a block with no transactions
- // 0c0000 - connect a block with no transactions
- // 0c0000 - connect a block with no transactions
- // 0c0000 - connect a block with no transactions
- // 0c0000 - connect a block with no transactions
- // - by now client should have sent a channel_ready (CHECK 3: SendChannelReady to 03000000 for chan 3d000000)
- //
- // 030012 - inbound read from peer id 0 of len 18
- // 0043 03000000000000000000000000000000 - message header indicating message length 67
- // 030053 - inbound read from peer id 0 of len 83
- // 0024 3d00000000000000000000000000000000000000000000000000000000000000 020800000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - channel_ready and mac
- //
- // 01 - new inbound connection with id 1
- // 030132 - inbound read from peer id 1 of len 50
- // 0003000000000000000000000000000000000000000000000000000000000000000703000000000000000000000000000000 - inbound noise act 1
- // 030142 - inbound read from peer id 1 of len 66
- // 000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000 - inbound noise act 3
- //
- // 030112 - inbound read from peer id 1 of len 18
- // 0010 01000000000000000000000000000000 - message header indicating message length 16
- // 030120 - inbound read from peer id 1 of len 32
- // 0010 00021aaa 0008aaaaaaaaaaaa9aaa 01000000000000000000000000000000 - init message (type 16) with static_remotekey required and other bits optional and mac
- //
- // 05 01 030200000000000000000000000000000000000000000000000000000000000000 00c350 0003e8 - create outbound channel to peer 1 for 50k sat
- // 00fd - One feerate requests (all returning min feerate) (gonna be ingested by FuzzEstimator)
- //
- // 030112 - inbound read from peer id 1 of len 18
- // 0112 01000000000000000000000000000000 - message header indicating message length 274
- // 0301ff - inbound read from peer id 1 of len 255
- // 0021 0000000000000000000000000000000000000000000000000000000000000e05 0000000000000162 00000000004c4b40 00000000000003e8 00000000000003e8 00000002 03f0 0005 030000000000000000000000000000000000000000000000000000000000000100 030000000000000000000000000000000000000000000000000000000000000200 030000000000000000000000000000000000000000000000000000000000000300 030000000000000000000000000000000000000000000000000000000000000400 030000000000000000000000000000000000000000000000000000000000000500 02660000000000000000000000000000 - beginning of accept_channel
- // 030123 - inbound read from peer id 1 of len 35
- // 0000000000000000000000000000000000 0000 01000000000000000000000000000000 - rest of accept_channel and mac
- //
- // 0a - create the funding transaction (client should send funding_created now)
- //
- // 00fd00fd - Two feerate requests (calculating max dust exposure) (all returning min feerate) (gonna be ingested by FuzzEstimator)
- //
- // 030112 - inbound read from peer id 1 of len 18
- // 0062 01000000000000000000000000000000 - message header indicating message length 98
- // 030172 - inbound read from peer id 1 of len 114
- // 0023 3a00000000000000000000000000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000007c0001000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - funding_signed message and mac
- //
- // 0b - broadcast funding transaction
- // - by now client should have sent a channel_ready (CHECK 4: SendChannelReady to 03020000 for chan 3f000000)
- //
- // 030112 - inbound read from peer id 1 of len 18
- // 0043 01000000000000000000000000000000 - message header indicating message length 67
- // 030153 - inbound read from peer id 1 of len 83
- // 0024 3a00000000000000000000000000000000000000000000000000000000000000 026700000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - channel_ready and mac
- //
- // 030012 - inbound read from peer id 0 of len 18
- // 05ac 03000000000000000000000000000000 - message header indicating message length 1452
- // 0300ff - inbound read from peer id 0 of len 255
- // 0080 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000000 0000000000003e80 ff00000000000000000000000000000000000000000000000000000000000000 000003f0 00 030000000000000000000000000000000000000000000000000000000000000555 11 020203e8 0401a0 060800000e0000010000 0a00000000000000000000000000000000000000000000000000000000000000 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - beginning of update_add_htlc from 0 to 1 via client
- // 0300ff - inbound read from peer id 0 of len 255
- // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
- // 0300ff - inbound read from peer id 0 of len 255
- // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
- // 0300ff - inbound read from peer id 0 of len 255
- // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
- // 0300ff - inbound read from peer id 0 of len 255
- // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
- // 0300c1 - inbound read from peer id 0 of len 193
- // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ab00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - end of update_add_htlc from 0 to 1 via client and mac
- //
- // 00fd - One feerate request (calculating max dust exposure) (all returning min feerate) (gonna be ingested by FuzzEstimator)
- //
- // 030012 - inbound read from peer id 0 of len 18
- // 0064 03000000000000000000000000000000 - message header indicating message length 100
- // 030074 - inbound read from peer id 0 of len 116
- // 0084 3d00000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000300100000000000000000000000000000000000000000000000000000000000000 0000 03000000000000000000000000000000 - commitment_signed and mac
- // - client should now respond with revoke_and_ack and commitment_signed (CHECK 5/6: types 133 and 132 to peer 03000000)
- //
- // 030012 - inbound read from peer id 0 of len 18
- // 0063 03000000000000000000000000000000 - message header indicating message length 99
- // 030073 - inbound read from peer id 0 of len 115
- // 0085 3d00000000000000000000000000000000000000000000000000000000000000 0900000000000000000000000000000000000000000000000000000000000000 020b00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - revoke_and_ack and mac
- //
- // 07 - process the now-pending HTLC forward
- // - client now sends id 1 update_add_htlc and commitment_signed (CHECK 7: UpdateHTLCs event for node 03020000 with 1 HTLCs for channel 3f000000)
- //
- // 00fd00fd - Two feerate requests (calculating max dust exposure) (all returning min feerate) (gonna be ingested by FuzzEstimator)
- //
- // - we respond with commitment_signed then revoke_and_ack (a weird, but valid, order)
- // 030112 - inbound read from peer id 1 of len 18
- // 0064 01000000000000000000000000000000 - message header indicating message length 100
- // 030174 - inbound read from peer id 1 of len 116
- // 0084 3a00000000000000000000000000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000006a0001000000000000000000000000000000000000000000000000000000000000 0000 01000000000000000000000000000000 - commitment_signed and mac
- //
- // 030112 - inbound read from peer id 1 of len 18
- // 0063 01000000000000000000000000000000 - message header indicating message length 99
- // 030173 - inbound read from peer id 1 of len 115
- // 0085 3a00000000000000000000000000000000000000000000000000000000000000 6600000000000000000000000000000000000000000000000000000000000000 026400000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - revoke_and_ack and mac
- //
- // 030112 - inbound read from peer id 1 of len 18
- // 004a 01000000000000000000000000000000 - message header indicating message length 74
- // 03015a - inbound read from peer id 1 of len 90
- // 0082 3a00000000000000000000000000000000000000000000000000000000000000 0000000000000000 ff00888888888888888888888888888888888888888888888888888888888888 01000000000000000000000000000000 - update_fulfill_htlc and mac
- // - client should immediately claim the pending HTLC from peer 0 (CHECK 8: SendFulfillHTLCs for node 03000000 with preimage ff00888888 for channel 3d000000)
- //
- // 030112 - inbound read from peer id 1 of len 18
- // 0064 01000000000000000000000000000000 - message header indicating message length 100
- // 030174 - inbound read from peer id 1 of len 116
- // 0084 3a00000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000100001000000000000000000000000000000000000000000000000000000000000 0000 01000000000000000000000000000000 - commitment_signed and mac
- //
- // 030112 - inbound read from peer id 1 of len 18
- // 0063 01000000000000000000000000000000 - message header indicating message length 99
- // 030173 - inbound read from peer id 1 of len 115
- // 0085 3a00000000000000000000000000000000000000000000000000000000000000 6700000000000000000000000000000000000000000000000000000000000000 026500000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - revoke_and_ack and mac
- //
- // - before responding to the commitment_signed generated above, send a new HTLC
- // 030012 - inbound read from peer id 0 of len 18
- // 05ac 03000000000000000000000000000000 - message header indicating message length 1452
- // 0300ff - inbound read from peer id 0 of len 255
- // 0080 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000001 0000000000003e80 ff00000000000000000000000000000000000000000000000000000000000000 000003f0 00 030000000000000000000000000000000000000000000000000000000000000555 11 020203e8 0401a0 060800000e0000010000 0a00000000000000000000000000000000000000000000000000000000000000 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - beginning of update_add_htlc from 0 to 1 via client
- // 0300ff - inbound read from peer id 0 of len 255
- // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
- // 0300ff - inbound read from peer id 0 of len 255
- // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
- // 0300ff - inbound read from peer id 0 of len 255
- // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
- // 0300ff - inbound read from peer id 0 of len 255
- // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
- // 0300c1 - inbound read from peer id 0 of len 193
- // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ab00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - end of update_add_htlc from 0 to 1 via client and mac
- //
- // 00fd - One feerate request (calculating max dust exposure) (all returning min feerate) (gonna be ingested by FuzzEstimator)
- //
- // - now respond to the update_fulfill_htlc+commitment_signed messages the client sent to peer 0
- // 030012 - inbound read from peer id 0 of len 18
- // 0063 03000000000000000000000000000000 - message header indicating message length 99
- // 030073 - inbound read from peer id 0 of len 115
- // 0085 3d00000000000000000000000000000000000000000000000000000000000000 0800000000000000000000000000000000000000000000000000000000000000 020a00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - revoke_and_ack and mac
- // - client should now respond with revoke_and_ack and commitment_signed (CHECK 5/6 duplicates)
- //
- // 030012 - inbound read from peer id 0 of len 18
- // 0064 03000000000000000000000000000000 - message header indicating message length 100
- // 030074 - inbound read from peer id 0 of len 116
- // 0084 3d00000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000c30100000000000000000000000000000000000000000000000000000000000000 0000 03000000000000000000000000000000 - commitment_signed and mac
- //
- // 030012 - inbound read from peer id 0 of len 18
- // 0063 03000000000000000000000000000000 - message header indicating message length 99
- // 030073 - inbound read from peer id 0 of len 115
- // 0085 3d00000000000000000000000000000000000000000000000000000000000000 0b00000000000000000000000000000000000000000000000000000000000000 020d00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - revoke_and_ack and mac
- //
- // 07 - process the now-pending HTLC forward
- // - client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate)
- // - we respond with revoke_and_ack, then commitment_signed, then update_fail_htlc
- //
- // 00fd00fd - Two feerate requests (calculating max dust exposure) (all returning min feerate) (gonna be ingested by FuzzEstimator)
+ let mut test = Vec::new();
+ // our network key
+ ext_from_hex("0100000000000000000000000000000000000000000000000000000000000000", &mut test);
+ // config
+ ext_from_hex("0000000000900000000000000000640001000000000001ffff0000000000000000ffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff000000ffffffff00ffff1a000400010000020400000000040200000a08ffffffffffffffff0001000000", &mut test);
+
+ // new outbound connection with id 0
+ ext_from_hex("00", &mut test);
+ // peer's pubkey
+ ext_from_hex("030000000000000000000000000000000000000000000000000000000000000002", &mut test);
+ // inbound read from peer id 0 of len 50
+ ext_from_hex("030032", &mut test);
+ // noise act two (0||pubkey||mac)
+ ext_from_hex("00 030000000000000000000000000000000000000000000000000000000000000002 03000000000000000000000000000000", &mut test);
+
+ // inbound read from peer id 0 of len 18
+ ext_from_hex("030012", &mut test);
+ // message header indicating message length 16
+ ext_from_hex("0010 03000000000000000000000000000000", &mut test);
+ // inbound read from peer id 0 of len 32
+ ext_from_hex("030020", &mut test);
+ // init message (type 16) with static_remotekey required, no channel_type/anchors/taproot, and other bits optional and mac
+ ext_from_hex("0010 00021aaa 0008aaa20aaa2a0a9aaa 03000000000000000000000000000000", &mut test);
+
+ // inbound read from peer id 0 of len 18
+ ext_from_hex("030012", &mut test);
+ // message header indicating message length 327
+ ext_from_hex("0147 03000000000000000000000000000000", &mut test);
+ // inbound read from peer id 0 of len 254
+ ext_from_hex("0300fe", &mut test);
+ // beginning of open_channel message
+ ext_from_hex("0020 6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000 ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679 000000000000c350 0000000000000000 0000000000000162 ffffffffffffffff 0000000000000222 0000000000000000 000000fd 0006 01e3 030000000000000000000000000000000000000000000000000000000000000001 030000000000000000000000000000000000000000000000000000000000000002 030000000000000000000000000000000000000000000000000000000000000003 030000000000000000000000000000000000000000000000000000000000000004", &mut test);
+ // inbound read from peer id 0 of len 89
+ ext_from_hex("030059", &mut test);
+ // rest of open_channel and mac
+ ext_from_hex("030000000000000000000000000000000000000000000000000000000000000005 020900000000000000000000000000000000000000000000000000000000000000 01 0000 01021000 03000000000000000000000000000000", &mut test);
+
+ // One feerate request returning min feerate, which our open_channel also uses (ingested by FuzzEstimator)
+ ext_from_hex("00fd", &mut test);
+ // client should now respond with accept_channel (CHECK 1: type 33 to peer 03000000)
+
+ // inbound read from peer id 0 of len 18
+ ext_from_hex("030012", &mut test);
+ // message header indicating message length 132
+ ext_from_hex("0084 03000000000000000000000000000000", &mut test);
+ // inbound read from peer id 0 of len 148
+ ext_from_hex("030094", &mut test);
+ // funding_created and mac
+ ext_from_hex("0022 ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679 3d00000000000000000000000000000000000000000000000000000000000000 0000 00000000000000000000000000000000000000000000000000000000000000210100000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test);
+ // client should now respond with funding_signed (CHECK 2: type 35 to peer 03000000)
+
+ // connect a block with one transaction of len 94
+ ext_from_hex("0c005e", &mut test);
+ // the funding transaction
+ ext_from_hex("020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae0000000000000000000000000000000000000000000000000000000000000000000000", &mut test);
+ // connect a block with no transactions, one per line
+ ext_from_hex("0c0000", &mut test);
+ ext_from_hex("0c0000", &mut test);
+ ext_from_hex("0c0000", &mut test);
+ ext_from_hex("0c0000", &mut test);
+ ext_from_hex("0c0000", &mut test);
+ ext_from_hex("0c0000", &mut test);
+ ext_from_hex("0c0000", &mut test);
+ ext_from_hex("0c0000", &mut test);
+ ext_from_hex("0c0000", &mut test);
+ ext_from_hex("0c0000", &mut test);
+ ext_from_hex("0c0000", &mut test);
+ ext_from_hex("0c0000", &mut test);
+ // by now client should have sent a channel_ready (CHECK 3: SendChannelReady to 03000000 for chan 3d000000)
+
+ // inbound read from peer id 0 of len 18
+ ext_from_hex("030012", &mut test);
+ // message header indicating message length 67
+ ext_from_hex("0043 03000000000000000000000000000000", &mut test);
+ // inbound read from peer id 0 of len 83
+ ext_from_hex("030053", &mut test);
+ // channel_ready and mac
+ ext_from_hex("0024 3d00000000000000000000000000000000000000000000000000000000000000 020800000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test);
+
+ // new inbound connection with id 1
+ ext_from_hex("01", &mut test);
+ // inbound read from peer id 1 of len 50
+ ext_from_hex("030132", &mut test);
+ // inbound noise act 1
+ ext_from_hex("0003000000000000000000000000000000000000000000000000000000000000000703000000000000000000000000000000", &mut test);
+ // inbound read from peer id 1 of len 66
+ ext_from_hex("030142", &mut test);
+ // inbound noise act 3
+ ext_from_hex("000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000", &mut test);
+
+ // inbound read from peer id 1 of len 18
+ ext_from_hex("030112", &mut test);
+ // message header indicating message length 16
+ ext_from_hex("0010 01000000000000000000000000000000", &mut test);
+ // inbound read from peer id 1 of len 32
+ ext_from_hex("030120", &mut test);
+ // init message (type 16) with static_remotekey required, no channel_type/anchors/taproot, and other bits optional and mac
+ ext_from_hex("0010 00021aaa 0008aaa20aaa2a0a9aaa 01000000000000000000000000000000", &mut test);
+
+ // create outbound channel to peer 1 for 50k sat
+ ext_from_hex("05 01 030200000000000000000000000000000000000000000000000000000000000000 00c350 0003e8", &mut test);
+ // One feerate requests (all returning min feerate) (gonna be ingested by FuzzEstimator)
+ ext_from_hex("00fd", &mut test);
+
+ // inbound read from peer id 1 of len 18
+ ext_from_hex("030112", &mut test);
+ // message header indicating message length 274
+ ext_from_hex("0112 01000000000000000000000000000000", &mut test);
+ // inbound read from peer id 1 of len 255
+ ext_from_hex("0301ff", &mut test);
+ // beginning of accept_channel
+ ext_from_hex("0021 0000000000000000000000000000000000000000000000000000000000000e05 0000000000000162 00000000004c4b40 00000000000003e8 00000000000003e8 00000002 03f0 0005 030000000000000000000000000000000000000000000000000000000000000100 030000000000000000000000000000000000000000000000000000000000000200 030000000000000000000000000000000000000000000000000000000000000300 030000000000000000000000000000000000000000000000000000000000000400 030000000000000000000000000000000000000000000000000000000000000500 02660000000000000000000000000000", &mut test);
+ // inbound read from peer id 1 of len 35
+ ext_from_hex("030123", &mut test);
+ // rest of accept_channel and mac
+ ext_from_hex("0000000000000000000000000000000000 0000 01000000000000000000000000000000", &mut test);
+
+ // create the funding transaction (client should send funding_created now)
+ ext_from_hex("0a", &mut test);
+
+ // inbound read from peer id 1 of len 18
+ ext_from_hex("030112", &mut test);
+ // message header indicating message length 98
+ ext_from_hex("0062 01000000000000000000000000000000", &mut test);
+ // inbound read from peer id 1 of len 114
+ ext_from_hex("030172", &mut test);
+ // funding_signed message and mac
+ ext_from_hex("0023 3a00000000000000000000000000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000007c0001000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000", &mut test);
+
+ // broadcast funding transaction
+ ext_from_hex("0b", &mut test);
+ // by now client should have sent a channel_ready (CHECK 4: SendChannelReady to 03020000 for chan 3f000000)
+
+ // inbound read from peer id 1 of len 18
+ ext_from_hex("030112", &mut test);
+ // message header indicating message length 67
+ ext_from_hex("0043 01000000000000000000000000000000", &mut test);
+ // inbound read from peer id 1 of len 83
+ ext_from_hex("030153", &mut test);
+ // channel_ready and mac
+ ext_from_hex("0024 3a00000000000000000000000000000000000000000000000000000000000000 026700000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000", &mut test);
+
+ // inbound read from peer id 0 of len 18
+ ext_from_hex("030012", &mut test);
+ // message header indicating message length 1452
+ ext_from_hex("05ac 03000000000000000000000000000000", &mut test);
+ // inbound read from peer id 0 of len 255
+ ext_from_hex("0300ff", &mut test);
+ // beginning of update_add_htlc from 0 to 1 via client
+ ext_from_hex("0080 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000000 0000000000003e80 ff00000000000000000000000000000000000000000000000000000000000000 000003f0 00 030000000000000000000000000000000000000000000000000000000000000555 11 020203e8 0401a0 060800000e0000010000 0a00000000000000000000000000000000000000000000000000000000000000 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", &mut test);
+ // inbound read from peer id 0 of len 255
+ ext_from_hex("0300ff", &mut test);
+ ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", &mut test);
+ // inbound read from peer id 0 of len 255
+ ext_from_hex("0300ff", &mut test);
+ ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", &mut test);
+ // inbound read from peer id 0 of len 255
+ ext_from_hex("0300ff", &mut test);
+ ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", &mut test);
+ // inbound read from peer id 0 of len 255
+ ext_from_hex("0300ff", &mut test);
+ ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", &mut test);
+ // inbound read from peer id 0 of len 193
+ ext_from_hex("0300c1", &mut test);
+ // end of update_add_htlc from 0 to 1 via client and mac
+ ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ab00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test);
+
+ // inbound read from peer id 0 of len 18
+ ext_from_hex("030012", &mut test);
+ // message header indicating message length 100
+ ext_from_hex("0064 03000000000000000000000000000000", &mut test);
+ // inbound read from peer id 0 of len 116
+ ext_from_hex("030074", &mut test);
+ // commitment_signed and mac
+ ext_from_hex("0084 3d00000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000300100000000000000000000000000000000000000000000000000000000000000 0000 03000000000000000000000000000000", &mut test);
+ // client should now respond with revoke_and_ack and commitment_signed (CHECK 5/6: types 133 and 132 to peer 03000000)
+
+ // inbound read from peer id 0 of len 18
+ ext_from_hex("030012", &mut test);
+ // message header indicating message length 99
+ ext_from_hex("0063 03000000000000000000000000000000", &mut test);
+ // inbound read from peer id 0 of len 115
+ ext_from_hex("030073", &mut test);
+ // revoke_and_ack and mac
+ ext_from_hex("0085 3d00000000000000000000000000000000000000000000000000000000000000 0900000000000000000000000000000000000000000000000000000000000000 020b00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test);
+
+ // process the now-pending HTLC forward
+ ext_from_hex("07", &mut test);
+ // client now sends id 1 update_add_htlc and commitment_signed (CHECK 7: UpdateHTLCs event for node 03020000 with 1 HTLCs for channel 3f000000)
+
+ // we respond with commitment_signed then revoke_and_ack (a weird, but valid, order)
+ // inbound read from peer id 1 of len 18
+ ext_from_hex("030112", &mut test);
+ // message header indicating message length 100
+ ext_from_hex("0064 01000000000000000000000000000000", &mut test);
+ // inbound read from peer id 1 of len 116
+ ext_from_hex("030174", &mut test);
+ // commitment_signed and mac
+ ext_from_hex("0084 3a00000000000000000000000000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000006a0001000000000000000000000000000000000000000000000000000000000000 0000 01000000000000000000000000000000", &mut test);
//
- // 030112 - inbound read from peer id 1 of len 18
- // 0064 01000000000000000000000000000000 - message header indicating message length 100
- // 030174 - inbound read from peer id 1 of len 116
- // 0084 3a00000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000390001000000000000000000000000000000000000000000000000000000000000 0000 01000000000000000000000000000000 - commitment_signed and mac
+ // inbound read from peer id 1 of len 18
+ ext_from_hex("030112", &mut test);
+ // message header indicating message length 99
+ ext_from_hex("0063 01000000000000000000000000000000", &mut test);
+ // inbound read from peer id 1 of len 115
+ ext_from_hex("030173", &mut test);
+ // revoke_and_ack and mac
+ ext_from_hex("0085 3a00000000000000000000000000000000000000000000000000000000000000 6600000000000000000000000000000000000000000000000000000000000000 026400000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000", &mut test);
//
- // 030112 - inbound read from peer id 1 of len 18
- // 0063 01000000000000000000000000000000 - message header indicating message length 99
- // 030173 - inbound read from peer id 1 of len 115
- // 0085 3a00000000000000000000000000000000000000000000000000000000000000 6400000000000000000000000000000000000000000000000000000000000000 027000000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - revoke_and_ack and mac
+ // inbound read from peer id 1 of len 18
+ ext_from_hex("030112", &mut test);
+ // message header indicating message length 74
+ ext_from_hex("004a 01000000000000000000000000000000", &mut test);
+ // inbound read from peer id 1 of len 90
+ ext_from_hex("03015a", &mut test);
+ // update_fulfill_htlc and mac
+ ext_from_hex("0082 3a00000000000000000000000000000000000000000000000000000000000000 0000000000000000 ff00888888888888888888888888888888888888888888888888888888888888 01000000000000000000000000000000", &mut test);
+ // client should immediately claim the pending HTLC from peer 0 (CHECK 8: SendFulfillHTLCs for node 03000000 with preimage ff00888888 for channel 3d000000)
+
+ // inbound read from peer id 1 of len 18
+ ext_from_hex("030112", &mut test);
+ // message header indicating message length 100
+ ext_from_hex("0064 01000000000000000000000000000000", &mut test);
+ // inbound read from peer id 1 of len 116
+ ext_from_hex("030174", &mut test);
+ // commitment_signed and mac
+ ext_from_hex("0084 3a00000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000100001000000000000000000000000000000000000000000000000000000000000 0000 01000000000000000000000000000000", &mut test);
+
+ // inbound read from peer id 1 of len 18
+ ext_from_hex("030112", &mut test);
+ // message header indicating message length 99
+ ext_from_hex("0063 01000000000000000000000000000000", &mut test);
+ // inbound read from peer id 1 of len 115
+ ext_from_hex("030173", &mut test);
+ // revoke_and_ack and mac
+ ext_from_hex("0085 3a00000000000000000000000000000000000000000000000000000000000000 6700000000000000000000000000000000000000000000000000000000000000 026500000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000", &mut test);
+
+ // before responding to the commitment_signed generated above, send a new HTLC
+ // inbound read from peer id 0 of len 18
+ ext_from_hex("030012", &mut test);
+ // message header indicating message length 1452
+ ext_from_hex("05ac 03000000000000000000000000000000", &mut test);
+ // inbound read from peer id 0 of len 255
+ ext_from_hex("0300ff", &mut test);
+ // beginning of update_add_htlc from 0 to 1 via client
+ ext_from_hex("0080 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000001 0000000000003e80 ff00000000000000000000000000000000000000000000000000000000000000 000003f0 00 030000000000000000000000000000000000000000000000000000000000000555 11 020203e8 0401a0 060800000e0000010000 0a00000000000000000000000000000000000000000000000000000000000000 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", &mut test);
+ // inbound read from peer id 0 of len 255
+ ext_from_hex("0300ff", &mut test);
+ ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", &mut test);
+ // inbound read from peer id 0 of len 255
+ ext_from_hex("0300ff", &mut test);
+ ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", &mut test);
+ // inbound read from peer id 0 of len 255
+ ext_from_hex("0300ff", &mut test);
+ ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", &mut test);
+ // inbound read from peer id 0 of len 255
+ ext_from_hex("0300ff", &mut test);
+ ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", &mut test);
+ // inbound read from peer id 0 of len 193
+ ext_from_hex("0300c1", &mut test);
+ // end of update_add_htlc from 0 to 1 via client and mac
+ ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ab00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test);
+
+ // now respond to the update_fulfill_htlc+commitment_signed messages the client sent to peer 0
+ // inbound read from peer id 0 of len 18
+ ext_from_hex("030012", &mut test);
+ // message header indicating message length 99
+ ext_from_hex("0063 03000000000000000000000000000000", &mut test);
+ // inbound read from peer id 0 of len 115
+ ext_from_hex("030073", &mut test);
+ // revoke_and_ack and mac
+ ext_from_hex("0085 3d00000000000000000000000000000000000000000000000000000000000000 0800000000000000000000000000000000000000000000000000000000000000 020a00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test);
+ // client should now respond with revoke_and_ack and commitment_signed (CHECK 5/6 duplicates)
+
+ // inbound read from peer id 0 of len 18
+ ext_from_hex("030012", &mut test);
+ // message header indicating message length 100
+ ext_from_hex("0064 03000000000000000000000000000000", &mut test);
+ // inbound read from peer id 0 of len 116
+ ext_from_hex("030074", &mut test);
+ // commitment_signed and mac
+ ext_from_hex("0084 3d00000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000c30100000000000000000000000000000000000000000000000000000000000000 0000 03000000000000000000000000000000", &mut test);
+
+ // inbound read from peer id 0 of len 18
+ ext_from_hex("030012", &mut test);
+ // message header indicating message length 99
+ ext_from_hex("0063 03000000000000000000000000000000", &mut test);
+ // inbound read from peer id 0 of len 115
+ ext_from_hex("030073", &mut test);
+ // revoke_and_ack and mac
+ ext_from_hex("0085 3d00000000000000000000000000000000000000000000000000000000000000 0b00000000000000000000000000000000000000000000000000000000000000 020d00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test);
+
+ // process the now-pending HTLC forward
+ ext_from_hex("07", &mut test);
+ // client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate)
+ // we respond with revoke_and_ack, then commitment_signed, then update_fail_htlc
+
+ // inbound read from peer id 1 of len 18
+ ext_from_hex("030112", &mut test);
+ // message header indicating message length 100
+ ext_from_hex("0064 01000000000000000000000000000000", &mut test);
+ // inbound read from peer id 1 of len 116
+ ext_from_hex("030174", &mut test);
+ // commitment_signed and mac
+ ext_from_hex("0084 3a00000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000390001000000000000000000000000000000000000000000000000000000000000 0000 01000000000000000000000000000000", &mut test);
+
+ // inbound read from peer id 1 of len 18
+ ext_from_hex("030112", &mut test);
+ // message header indicating message length 99
+ ext_from_hex("0063 01000000000000000000000000000000", &mut test);
+ // inbound read from peer id 1 of len 115
+ ext_from_hex("030173", &mut test);
+ // revoke_and_ack and mac
+ ext_from_hex("0085 3a00000000000000000000000000000000000000000000000000000000000000 6400000000000000000000000000000000000000000000000000000000000000 027000000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000", &mut test);
+
+ // inbound read from peer id 1 of len 18
+ ext_from_hex("030112", &mut test);
+ // message header indicating message length 44
+ ext_from_hex("002c 01000000000000000000000000000000", &mut test);
+ // inbound read from peer id 1 of len 60
+ ext_from_hex("03013c", &mut test);
+ // update_fail_htlc and mac
+ ext_from_hex("0083 3a00000000000000000000000000000000000000000000000000000000000000 0000000000000001 0000 01000000000000000000000000000000", &mut test);
+
+ // inbound read from peer id 1 of len 18
+ ext_from_hex("030112", &mut test);
+ // message header indicating message length 100
+ ext_from_hex("0064 01000000000000000000000000000000", &mut test);
+ // inbound read from peer id 1 of len 116
+ ext_from_hex("030174", &mut test);
+ // commitment_signed and mac
+ ext_from_hex("0084 3a00000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000390001000000000000000000000000000000000000000000000000000000000000 0000 01000000000000000000000000000000", &mut test);
+
+ // inbound read from peer id 1 of len 18
+ ext_from_hex("030112", &mut test);
+ // message header indicating message length 99
+ ext_from_hex("0063 01000000000000000000000000000000", &mut test);
+ // inbound read from peer id 1 of len 115
+ ext_from_hex("030173", &mut test);
+ // revoke_and_ack and mac
+ ext_from_hex("0085 3a00000000000000000000000000000000000000000000000000000000000000 6500000000000000000000000000000000000000000000000000000000000000 027100000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000", &mut test);
+
+ // process the now-pending HTLC forward
+ ext_from_hex("07", &mut test);
+ // client now sends id 0 update_fail_htlc and commitment_signed (CHECK 9)
+ // now respond to the update_fail_htlc+commitment_signed messages the client sent to peer 0
+
+ // inbound read from peer id 0 of len 18
+ ext_from_hex("030012", &mut test);
+ // message header indicating message length 99
+ ext_from_hex("0063 03000000000000000000000000000000", &mut test);
+ // inbound read from peer id 0 of len 115
+ ext_from_hex("030073", &mut test);
+ // revoke_and_ack and mac
+ ext_from_hex("0085 3d00000000000000000000000000000000000000000000000000000000000000 0a00000000000000000000000000000000000000000000000000000000000000 020c00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test);
+
+ // inbound read from peer id 0 of len 18
+ ext_from_hex("030012", &mut test);
+ // message header indicating message length 100
+ ext_from_hex("0064 03000000000000000000000000000000", &mut test);
+ // inbound read from peer id 0 of len 116
+ ext_from_hex("030074", &mut test);
+ // commitment_signed and mac
+ ext_from_hex("0084 3d00000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000320100000000000000000000000000000000000000000000000000000000000000 0000 03000000000000000000000000000000", &mut test);
+ // client should now respond with revoke_and_ack (CHECK 5 duplicate)
+
+ // inbound read from peer id 0 of len 18
+ ext_from_hex("030012", &mut test);
+ // message header indicating message length 1452
+ ext_from_hex("05ac 03000000000000000000000000000000", &mut test);
+ // inbound read from peer id 0 of len 255
+ ext_from_hex("0300ff", &mut test);
+ // beginning of update_add_htlc from 0 to 1 via client
+ ext_from_hex("0080 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000002 00000000000b0838 ff00000000000000000000000000000000000000000000000000000000000000 000003f0 00 030000000000000000000000000000000000000000000000000000000000000555 12 02030927c0 0401a0 060800000e0000010000 0a00000000000000000000000000000000000000000000000000000000000000 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", &mut test);
+ // inbound read from peer id 0 of len 255
+ ext_from_hex("0300ff", &mut test);
+ ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", &mut test);
+ // inbound read from peer id 0 of len 255
+ ext_from_hex("0300ff", &mut test);
+ ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", &mut test);
+ // inbound read from peer id 0 of len 255
+ ext_from_hex("0300ff", &mut test);
+ ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", &mut test);
+ // inbound read from peer id 0 of len 255
+ ext_from_hex("0300ff", &mut test);
+ ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", &mut test);
+ // inbound read from peer id 0 of len 193
+ ext_from_hex("0300c1", &mut test);
+ // end of update_add_htlc from 0 to 1 via client and mac
+ ext_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff 5300000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test);
+
+ // inbound read from peer id 0 of len 18
+ ext_from_hex("030012", &mut test);
+ // message header indicating message length 164
+ ext_from_hex("00a4 03000000000000000000000000000000", &mut test);
+ // inbound read from peer id 0 of len 180
+ ext_from_hex("0300b4", &mut test);
+ // commitment_signed and mac
+ ext_from_hex("0084 3d00000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000750100000000000000000000000000000000000000000000000000000000000000 0001 00000000000000000000000000000000000000000000000000000000000000670500000000000000000000000000000000000000000000000000000000000006 03000000000000000000000000000000", &mut test);
+ // client should now respond with revoke_and_ack and commitment_signed (CHECK 5/6 duplicates)
+
+ // inbound read from peer id 0 of len 18
+ ext_from_hex("030012", &mut test);
+ // message header indicating message length 99
+ ext_from_hex("0063 03000000000000000000000000000000", &mut test);
+ // inbound read from peer id 0 of len 115
+ ext_from_hex("030073", &mut test);
+ // revoke_and_ack and mac
+ ext_from_hex("0085 3d00000000000000000000000000000000000000000000000000000000000000 0d00000000000000000000000000000000000000000000000000000000000000 020f00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test);
+
+ // process the now-pending HTLC forward
+ ext_from_hex("07", &mut test);
+ // client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate)
+
+ // connect a block with one transaction of len 125
+ ext_from_hex("0c007d", &mut test);
+ // the commitment transaction for channel 3f00000000000000000000000000000000000000000000000000000000000000
+ ext_from_hex("02000000013a000000000000000000000000000000000000000000000000000000000000000000000000000000800258020000000000002200204b0000000000000000000000000000000000000000000000000000000000000014c0000000000000160014280000000000000000000000000000000000000005000020", &mut test);
//
- // 030112 - inbound read from peer id 1 of len 18
- // 002c 01000000000000000000000000000000 - message header indicating message length 44
- // 03013c - inbound read from peer id 1 of len 60
- // 0083 3a00000000000000000000000000000000000000000000000000000000000000 0000000000000001 0000 01000000000000000000000000000000 - update_fail_htlc and mac
- //
- // 030112 - inbound read from peer id 1 of len 18
- // 0064 01000000000000000000000000000000 - message header indicating message length 100
- // 030174 - inbound read from peer id 1 of len 116
- // 0084 3a00000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000390001000000000000000000000000000000000000000000000000000000000000 0000 01000000000000000000000000000000 - commitment_signed and mac
- //
- // 030112 - inbound read from peer id 1 of len 18
- // 0063 01000000000000000000000000000000 - message header indicating message length 99
- // 030173 - inbound read from peer id 1 of len 115
- // 0085 3a00000000000000000000000000000000000000000000000000000000000000 6500000000000000000000000000000000000000000000000000000000000000 027100000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - revoke_and_ack and mac
- //
- // 07 - process the now-pending HTLC forward
- // - client now sends id 0 update_fail_htlc and commitment_signed (CHECK 9)
- // - now respond to the update_fail_htlc+commitment_signed messages the client sent to peer 0
- //
- // 030012 - inbound read from peer id 0 of len 18
- // 0063 03000000000000000000000000000000 - message header indicating message length 99
- // 030073 - inbound read from peer id 0 of len 115
- // 0085 3d00000000000000000000000000000000000000000000000000000000000000 0a00000000000000000000000000000000000000000000000000000000000000 020c00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - revoke_and_ack and mac
- //
- // 030012 - inbound read from peer id 0 of len 18
- // 0064 03000000000000000000000000000000 - message header indicating message length 100
- // 030074 - inbound read from peer id 0 of len 116
- // 0084 3d00000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000320100000000000000000000000000000000000000000000000000000000000000 0000 03000000000000000000000000000000 - commitment_signed and mac
- // - client should now respond with revoke_and_ack (CHECK 5 duplicate)
- //
- // 030012 - inbound read from peer id 0 of len 18
- // 05ac 03000000000000000000000000000000 - message header indicating message length 1452
- // 0300ff - inbound read from peer id 0 of len 255
- // 0080 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000002 00000000000b0838 ff00000000000000000000000000000000000000000000000000000000000000 000003f0 00 030000000000000000000000000000000000000000000000000000000000000555 12 02030927c0 0401a0 060800000e0000010000 0a00000000000000000000000000000000000000000000000000000000000000 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - beginning of update_add_htlc from 0 to 1 via client
- // 0300ff - inbound read from peer id 0 of len 255
- // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
- // 0300ff - inbound read from peer id 0 of len 255
- // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
- // 0300ff - inbound read from peer id 0 of len 255
- // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
- // 0300ff - inbound read from peer id 0 of len 255
- // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
- // 0300c1 - inbound read from peer id 0 of len 193
- // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff 5300000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - end of update_add_htlc from 0 to 1 via client and mac
- //
- // 00fd - One feerate request (calculating max dust exposure) (all returning min feerate) (gonna be ingested by FuzzEstimator)
- //
- // 030012 - inbound read from peer id 0 of len 18
- // 00a4 03000000000000000000000000000000 - message header indicating message length 164
- // 0300b4 - inbound read from peer id 0 of len 180
- // 0084 3d00000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000750100000000000000000000000000000000000000000000000000000000000000 0001 00000000000000000000000000000000000000000000000000000000000000670500000000000000000000000000000000000000000000000000000000000006 03000000000000000000000000000000 - commitment_signed and mac
- // - client should now respond with revoke_and_ack and commitment_signed (CHECK 5/6 duplicates)
- //
- // 030012 - inbound read from peer id 0 of len 18
- // 0063 03000000000000000000000000000000 - message header indicating message length 99
- // 030073 - inbound read from peer id 0 of len 115
- // 0085 3d00000000000000000000000000000000000000000000000000000000000000 0d00000000000000000000000000000000000000000000000000000000000000 020f00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - revoke_and_ack and mac
- //
- // 07 - process the now-pending HTLC forward
- // - client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate)
- //
- // 00fd00fd - Two feerate requests (calculating max dust exposure) (all returning min feerate) (gonna be ingested by FuzzEstimator)
- //
- // 0c007d - connect a block with one transaction of len 125
- // 02000000013a000000000000000000000000000000000000000000000000000000000000000000000000000000800258020000000000002200204b0000000000000000000000000000000000000000000000000000000000000014c0000000000000160014280000000000000000000000000000000000000005000020 - the commitment transaction for channel 3f00000000000000000000000000000000000000000000000000000000000000
- //
- // 0c005e - connect a block with one transaction of len 94
- // 0200000001730000000000000000000000000000000000000000000000000000000000000000000000000000000001a701000000000000220020b20000000000000000000000000000000000000000000000000000000000000000000000 - the HTLC timeout transaction
- // 0c0000 - connect a block with no transactions
- // 0c0000 - connect a block with no transactions
- // 0c0000 - connect a block with no transactions
- // 0c0000 - connect a block with no transactions
- // 0c0000 - connect a block with no transactions
- //
- // 07 - process the now-pending HTLC forward
- // - client now fails the HTLC backwards as it was unable to extract the payment preimage (CHECK 9 duplicate and CHECK 10)
+ // connect a block with one transaction of len 94
+ ext_from_hex("0c005e", &mut test);
+ // the HTLC timeout transaction
+ ext_from_hex("0200000001730000000000000000000000000000000000000000000000000000000000000000000000000000000001a701000000000000220020b20000000000000000000000000000000000000000000000000000000000000000000000", &mut test);
+ // connect a block with no transactions
+ ext_from_hex("0c0000", &mut test);
+ // connect a block with no transactions
+ ext_from_hex("0c0000", &mut test);
+ // connect a block with no transactions
+ ext_from_hex("0c0000", &mut test);
+ // connect a block with no transactions
+ ext_from_hex("0c0000", &mut test);
+ // connect a block with no transactions
+ ext_from_hex("0c0000", &mut test);
+
+ // process the now-pending HTLC forward
+ ext_from_hex("07", &mut test);
+ // client now fails the HTLC backwards as it was unable to extract the payment preimage (CHECK 9 duplicate and CHECK 10)
let logger = Arc::new(TrackingLogger { lines: Mutex::new(HashMap::new()) });
- super::do_test(&<Vec<u8>>::from_hex("01000000000000000000000000000000000000000000000000000000000000000000000001000300000000000000000000000000000000000000000000000000000000000000020300320003000000000000000000000000000000000000000000000000000000000000000203000000000000000000000000000000030012001003000000000000000000000000000000030020001000021aaa0008aaaaaaaaaaaa9aaa030000000000000000000000000000000300120147030000000000000000000000000000000300fe00206fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679000000000000c35000000000000000000000000000000162ffffffffffffffff00000000000002220000000000000000000000fd000601e3030000000000000000000000000000000000000000000000000000000000000001030000000000000000000000000000000000000000000000000000000000000002030000000000000000000000000000000000000000000000000000000000000003030000000000000000000000000000000000000000000000000000000000000004030059030000000000000000000000000000000000000000000000000000000000000005020900000000000000000000000000000000000000000000000000000000000000010000010210000300000000000000000000000000000000fd0300120084030000000000000000000000000000000300940022ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb1819096793d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000210100000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000c005e020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae00000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c000003001200430300000000000000000000000000000003005300243d0000000000000000000000000000000000000000000000000000000000000002080000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000010301320003000000000000000000000000000000000000000000000000000000000000000703000000000000000000000000000000030142000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000030112001001000000000000000000000000000000030120001000021aaa0008aaaaaaaaaaaa9aaa01000000000000000000000000000000050103020000000000000000000000000000000000000000000000000000000000000000c3500003e800fd0301120112010000000000000000000000000000000301ff00210000000000000000000000000000000000000000000000000000000000000e05000000000000016200000000004c4b4000000000000003e800000000000003e80000000203f000050300000000000000000000000000000000000000000000000000000000000001000300000000000000000000000000000000000000000000000000000000000002000300000000000000000000000000000000000000000000000000000000000003000300000000000000000000000000000000000000000000000000000000000004000300000000000000000000000000000000000000000000000000000000000005000266000000000000000000000000000003012300000000000000000000000000000000000000010000000000000000000000000000000a00fd00fd03011200620100000000000000000000000000000003017200233a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007c0001000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000b03011200430100000000000000000000000000000003015300243a000000000000000000000000000000000000000000000000000000000000000267000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f00003000000000000000000000000000000000000000000000000000000000000055511020203e80401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffab000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000fd03001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000020b00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000700fd00fd03011200640100000000000000000000000000000003017400843a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006a000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000660000000000000000000000000000000000000000000000000000000000000002640000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112004a0100000000000000000000000000000003015a00823a000000000000000000000000000000000000000000000000000000000000000000000000000000ff008888888888888888888888888888888888888888888888888888888888880100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a0000000000000000000000000000000000000000000000000000000000000067000000000000000000000000000000000000000000000000000000000000000265000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000010000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f00003000000000000000000000000000000000000000000000000000000000000055511020203e80401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffab000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000fd03001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000020a000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c3010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000b00000000000000000000000000000000000000000000000000000000000000020d00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000700fd00fd03011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000002700000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112002c0100000000000000000000000000000003013c00833a00000000000000000000000000000000000000000000000000000000000000000000000000000100000100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a000000000000000000000000000000000000000000000000000000000000006500000000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000703001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000020c000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000032010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d00000000000000000000000000000000000000000000000000000000000000000000000000000200000000000b0838ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005551202030927c00401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff53000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000fd03001200a4030000000000000000000000000000000300b400843d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007501000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000006705000000000000000000000000000000000000000000000000000000000000060300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000000020f00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000700fd00fd0c007d02000000013a000000000000000000000000000000000000000000000000000000000000000000000000000000800258020000000000002200204b0000000000000000000000000000000000000000000000000000000000000014c00000000000001600142800000000000000000000000000000000000000050000200c005e0200000001730000000000000000000000000000000000000000000000000000000000000000000000000000000001a701000000000000220020b200000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c000007").unwrap(), &(Arc::clone(&logger) as Arc<dyn Logger>));
+ super::do_test(&test, &(Arc::clone(&logger) as Arc<dyn Logger>));
let log_entries = logger.lines.lock().unwrap();
assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendAcceptChannel event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679".to_string())), Some(&1)); // 1
assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 with 0 adds, 0 fulfills, 1 fails for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&2)); // 9
assert_eq!(log_entries.get(&("lightning::chain::channelmonitor".to_string(), "Input spending counterparty commitment tx (0000000000000000000000000000000000000000000000000000000000000073:0) in 0000000000000000000000000000000000000000000000000000000000000067 resolves outbound HTLC with payment hash ff00000000000000000000000000000000000000000000000000000000000000 with timeout".to_string())), Some(&1)); // 10
}
+
+ #[test]
+ fn test_gossip_exchange_breakage() {
+ // To avoid accidentally causing all existing fuzz test cases to be useless by making minor
+ // changes (such as requesting feerate info in a new place), we exchange some gossip
+ // messages. Obviously this is pretty finicky, so this should be updated pretty liberally,
+ // but at least we'll know when changes occur.
+ // This test serves as a pretty good full_stack_target seed.
+
+ // What each byte represents is broken down below, and then everything is concatenated into
+ // one large test at the end (you want %s/ -.*//g %s/\n\| \|\t\|\///g).
+
+ // Following BOLT 8, lightning message on the wire are: 2-byte encrypted message length +
+ // 16-byte MAC of the encrypted message length + encrypted Lightning message + 16-byte MAC
+ // of the Lightning message
+ // I.e 2nd inbound read, len 18 : 0006 (encrypted message length) + 03000000000000000000000000000000 (MAC of the encrypted message length)
+ // Len 22 : 0010 00000000 (encrypted lightning message) + 03000000000000000000000000000000 (MAC of the Lightning message)
+
+ // Writing new code generating transactions and see a new failure ? Don't forget to add input for the FuzzEstimator !
+
+ let mut test = Vec::new();
+
+ // our network key
+ ext_from_hex("0100000000000000000000000000000000000000000000000000000000000000", &mut test);
+ // config
+ ext_from_hex("0000000000900000000000000000640001000000000001ffff0000000000000000ffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff000000ffffffff00ffff1a000400010000020400000000040200000a08ffffffffffffffff0001000000", &mut test);
+
+ // new outbound connection with id 0
+ ext_from_hex("00", &mut test);
+ // peer's pubkey
+ ext_from_hex("030000000000000000000000000000000000000000000000000000000000000002", &mut test);
+ // inbound read from peer id 0 of len 50
+ ext_from_hex("030032", &mut test);
+ // noise act two (0||pubkey||mac)
+ ext_from_hex("00 030000000000000000000000000000000000000000000000000000000000000002 03000000000000000000000000000000", &mut test);
+
+ // inbound read from peer id 0 of len 18
+ ext_from_hex("030012", &mut test);
+ // message header indicating message length 16
+ ext_from_hex("0010 03000000000000000000000000000000", &mut test);
+ // inbound read from peer id 0 of len 32
+ ext_from_hex("030020", &mut test);
+ // init message (type 16) with static_remotekey required, no channel_type/anchors/taproot, and other bits optional and mac
+ ext_from_hex("0010 00021aaa 0008aaa20aaa2a0a9aaa 03000000000000000000000000000000", &mut test);
+
+ // new inbound connection with id 1
+ ext_from_hex("01", &mut test);
+ // inbound read from peer id 1 of len 50
+ ext_from_hex("030132", &mut test);
+ // inbound noise act 1
+ ext_from_hex("0003000000000000000000000000000000000000000000000000000000000000000703000000000000000000000000000000", &mut test);
+ // inbound read from peer id 1 of len 66
+ ext_from_hex("030142", &mut test);
+ // inbound noise act 3
+ ext_from_hex("000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000", &mut test);
+
+ // inbound read from peer id 1 of len 18
+ ext_from_hex("030112", &mut test);
+ // message header indicating message length 16
+ ext_from_hex("0010 01000000000000000000000000000000", &mut test);
+ // inbound read from peer id 1 of len 32
+ ext_from_hex("030120", &mut test);
+ // init message (type 16) with static_remotekey required, no channel_type/anchors/taproot, and other bits optional and mac
+ ext_from_hex("0010 00021aaa 0008aaa20aaa2a0a9aaa 01000000000000000000000000000000", &mut test);
+
+ // inbound read from peer id 0 of len 18
+ ext_from_hex("030012", &mut test);
+ // message header indicating message length 432
+ ext_from_hex("01b0 03000000000000000000000000000000", &mut test);
+ // inbound read from peer id 0 of len 255
+ ext_from_hex("0300ff", &mut test);
+ // First part of channel_announcement (type 256)
+ ext_from_hex("0100 00000000000000000000000000000000000000000000000000000000000000b20303030303030303030303030303030303030303030303030303030303030303 00000000000000000000000000000000000000000000000000000000000000b20202020202020202020202020202020202020202020202020202020202020202 00000000000000000000000000000000000000000000000000000000000000b20303030303030303030303030303030303030303030303030303030303030303 00000000000000000000000000000000000000000000000000000000000000b20202020202020202020202020202020202020202020202020202020202", &mut test);
+ // inbound read from peer id 0 of len 193
+ ext_from_hex("0300c1", &mut test);
+ // Last part of channel_announcement and mac
+ ext_from_hex("020202 00006fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000000000000000002a030303030303030303030303030303030303030303030303030303030303030303020202020202020202020202020202020202020202020202020202020202020202030303030303030303030303030303030303030303030303030303030303030303020202020202020202020202020202020202020202020202020202020202020202 03000000000000000000000000000000", &mut test);
+
+ // inbound read from peer id 0 of len 18
+ ext_from_hex("030012", &mut test);
+ // message header indicating message length 138
+ ext_from_hex("008a 03000000000000000000000000000000", &mut test);
+ // inbound read from peer id 0 of len 154
+ ext_from_hex("03009a", &mut test);
+ // channel_update (type 258) and mac
+ ext_from_hex("0102 00000000000000000000000000000000000000000000000000000000000000a60303030303030303030303030303030303030303030303030303030303030303 6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000 000000000000002a0000002c01000028000000000000000000000000000000000000000005f5e100 03000000000000000000000000000000", &mut test);
+
+ // inbound read from peer id 0 of len 18
+ ext_from_hex("030012", &mut test);
+ // message header indicating message length 142
+ ext_from_hex("008e 03000000000000000000000000000000", &mut test);
+ // inbound read from peer id 0 of len 158
+ ext_from_hex("03009e", &mut test);
+ // node_announcement (type 257) and mac
+ ext_from_hex("0101 00000000000000000000000000000000000000000000000000000000000000280303030303030303030303030303030303030303030303030303030303030303 00000000002b03030303030303030303030303030303030303030303030303030303030303030300000000000000000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000", &mut test);
+
+ let logger = Arc::new(TrackingLogger { lines: Mutex::new(HashMap::new()) });
+ super::do_test(&test, &(Arc::clone(&logger) as Arc<dyn Logger>));
+
+ let log_entries = logger.lines.lock().unwrap();
+ assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Sending message to all peers except Some(PublicKey(0000000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000002)) or the announced channel's counterparties: ChannelAnnouncement { node_signature_1: 3026020200b202200303030303030303030303030303030303030303030303030303030303030303, node_signature_2: 3026020200b202200202020202020202020202020202020202020202020202020202020202020202, bitcoin_signature_1: 3026020200b202200303030303030303030303030303030303030303030303030303030303030303, bitcoin_signature_2: 3026020200b202200202020202020202020202020202020202020202020202020202020202020202, contents: UnsignedChannelAnnouncement { features: [], chain_hash: 6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000, short_channel_id: 42, node_id_1: NodeId(030303030303030303030303030303030303030303030303030303030303030303), node_id_2: NodeId(020202020202020202020202020202020202020202020202020202020202020202), bitcoin_key_1: NodeId(030303030303030303030303030303030303030303030303030303030303030303), bitcoin_key_2: NodeId(020202020202020202020202020202020202020202020202020202020202020202), excess_data: [] } }".to_string())), Some(&1));
+ assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Sending message to all peers except Some(PublicKey(0000000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000002)): ChannelUpdate { signature: 3026020200a602200303030303030303030303030303030303030303030303030303030303030303, contents: UnsignedChannelUpdate { chain_hash: 6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000, short_channel_id: 42, timestamp: 44, flags: 0, cltv_expiry_delta: 40, htlc_minimum_msat: 0, htlc_maximum_msat: 100000000, fee_base_msat: 0, fee_proportional_millionths: 0, excess_data: [] } }".to_string())), Some(&1));
+ assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Sending message to all peers except Some(PublicKey(0000000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000002)) or the announced node: NodeAnnouncement { signature: 302502012802200303030303030303030303030303030303030303030303030303030303030303, contents: UnsignedNodeAnnouncement { features: [], timestamp: 43, node_id: NodeId(030303030303030303030303030303030303030303030303030303030303030303), rgb: [0, 0, 0], alias: NodeAlias([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), addresses: [], excess_address_data: [], excess_data: [] } }".to_string())), Some(&1));
+ }
}
use lightning::util::indexed_map::{IndexedMap, self};
use std::collections::{BTreeMap, btree_map};
-use hashbrown::HashSet;
+use lightning::util::hash_tables::*;
use crate::utils::test_logger;
}
}
- let mut key_set = HashSet::with_capacity(256);
+ let mut key_set = hash_map_with_capacity(1024);
for k in indexed.unordered_keys() {
- assert!(key_set.insert(*k));
+ assert!(key_set.insert(*k, ()).is_none());
assert!(btree.contains_key(k));
}
assert_eq!(key_set.len(), btree.len());
key_set.clear();
for (k, v) in indexed.unordered_iter() {
- assert!(key_set.insert(*k));
+ assert!(key_set.insert(*k, ()).is_none());
assert_eq!(btree.get(k).unwrap(), v);
}
assert_eq!(key_set.len(), btree.len());
key_set.clear();
for (k, v) in indexed_clone.unordered_iter_mut() {
- assert!(key_set.insert(*k));
+ assert!(key_set.insert(*k, ()).is_none());
assert_eq!(btree.get(k).unwrap(), v);
}
assert_eq!(key_set.len(), btree.len());
use bitcoin::secp256k1::{KeyPair, Parity, PublicKey, Secp256k1, SecretKey, self};
use crate::utils::test_logger;
-use core::convert::{Infallible, TryFrom};
+use core::convert::TryFrom;
use lightning::blinded_path::BlindedPath;
use lightning::sign::EntropySource;
use lightning::ln::PaymentHash;
let even_pubkey = x_only_pubkey.public_key(Parity::Even);
if signing_pubkey == odd_pubkey || signing_pubkey == even_pubkey {
unsigned_invoice
- .sign::<_, Infallible>(
- |message| Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
+ .sign(|message: &UnsignedBolt12Invoice|
+ Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
)
.unwrap()
.write(&mut buffer)
.unwrap();
} else {
unsigned_invoice
- .sign::<_, Infallible>(
- |message| Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
+ .sign(|message: &UnsignedBolt12Invoice|
+ Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
)
.unwrap_err();
}
use bitcoin::secp256k1::{KeyPair, PublicKey, Secp256k1, SecretKey};
use crate::utils::test_logger;
-use core::convert::{Infallible, TryFrom};
+use core::convert::TryFrom;
use lightning::offers::invoice_request::UnsignedInvoiceRequest;
use lightning::offers::offer::{Amount, Offer, Quantity};
use lightning::offers::parse::Bolt12SemanticError;
if let Ok(invoice_request) = build_response(&offer, pubkey) {
invoice_request
- .sign::<_, Infallible>(
- |message| Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
+ .sign(|message: &UnsignedInvoiceRequest|
+ Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
)
.unwrap()
.write(&mut buffer)
use bitcoin::secp256k1::ecdsa::RecoverableSignature;
use bitcoin::secp256k1::schnorr;
-use lightning::blinded_path::BlindedPath;
+use lightning::blinded_path::{BlindedPath, EmptyNodeIdLookUp};
use lightning::ln::features::InitFeatures;
use lightning::ln::msgs::{self, DecodeError, OnionMessageHandler};
use lightning::ln::script::ShutdownScript;
node_secret: secret,
counter: AtomicU64::new(0),
};
+ let node_id_lookup = EmptyNodeIdLookUp {};
let message_router = TestMessageRouter {};
let offers_msg_handler = TestOffersMessageHandler {};
let custom_msg_handler = TestCustomMessageHandler {};
let onion_messenger = OnionMessenger::new(
- &keys_manager, &keys_manager, logger, &message_router, &offers_msg_handler,
- &custom_msg_handler
+ &keys_manager, &keys_manager, logger, &node_id_lookup, &message_router,
+ &offers_msg_handler, &custom_msg_handler
);
let peer_node_id = {
})
}
- fn create_blinded_paths<
- ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
- >(
- &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _entropy_source: &ES,
- _secp_ctx: &Secp256k1<T>
+ fn create_blinded_paths<T: secp256k1::Signing + secp256k1::Verification>(
+ &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _secp_ctx: &Secp256k1<T>,
) -> Result<Vec<BlindedPath>, ()> {
unreachable!()
}
Ok(len) => len,
Err(_) => return,
};
- buf.copy_from_slice(&get_slice!(len as usize + 16));
+ buf[..len as usize + 16].copy_from_slice(&get_slice!(len as usize + 16));
match crypter.decrypt_message(&mut buf[..len as usize + 16]) {
Ok(_) => {},
Err(_) => return,
use bitcoin::secp256k1::{KeyPair, PublicKey, Secp256k1, SecretKey, self};
use crate::utils::test_logger;
-use core::convert::{Infallible, TryFrom};
+use core::convert::TryFrom;
use lightning::blinded_path::BlindedPath;
use lightning::sign::EntropySource;
use lightning::ln::PaymentHash;
if let Ok(invoice) = build_response(&refund, pubkey, &secp_ctx) {
invoice
- .sign::<_, Infallible>(
- |message| Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
+ .sign(|message: &UnsignedBolt12Invoice|
+ Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
)
.unwrap()
.write(&mut buffer)
use bitcoin::blockdata::script::Builder;
use bitcoin::blockdata::transaction::TxOut;
-use lightning::blinded_path::{BlindedHop, BlindedPath};
+use lightning::blinded_path::{BlindedHop, BlindedPath, IntroductionNode};
use lightning::chain::transaction::OutPoint;
use lightning::ln::ChannelId;
use lightning::ln::channelmanager::{self, ChannelDetails, ChannelCounterparty};
use lightning::routing::router::{find_route, PaymentParameters, RouteHint, RouteHintHop, RouteParameters};
use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters};
use lightning::util::config::UserConfig;
+use lightning::util::hash_tables::*;
use lightning::util::ser::Readable;
use bitcoin::hashes::Hash;
use crate::utils::test_logger;
use std::convert::TryInto;
-use hashbrown::HashSet;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
msgs::DecodeError::ShortRead => panic!("We picked the length..."),
msgs::DecodeError::Io(e) => panic!("{:?}", e),
msgs::DecodeError::UnsupportedCompression => return,
+ msgs::DecodeError::DangerousValue => return,
}
}
}}
net_graph: &net_graph,
};
- let mut node_pks = HashSet::new();
+ let mut node_pks = new_hash_map();
let mut scid = 42;
macro_rules! first_hops {
count => {
for _ in 0..count {
scid += 1;
- let rnid = node_pks.iter().skip(u16::from_be_bytes(get_slice!(2).try_into().unwrap()) as usize % node_pks.len()).next().unwrap();
+ let (rnid, _) =
+ node_pks.iter().skip(u16::from_be_bytes(get_slice!(2).try_into().unwrap()) as usize % node_pks.len()).next().unwrap();
let capacity = u64::from_be_bytes(get_slice!(8).try_into().unwrap());
$first_hops_vec.push(ChannelDetails {
channel_id: ChannelId::new_zero(),
config: None,
feerate_sat_per_1000_weight: None,
channel_shutdown_state: Some(channelmanager::ChannelShutdownState::NotShuttingDown),
+ pending_inbound_htlcs: Vec::new(),
+ pending_outbound_htlcs: Vec::new(),
});
}
Some(&$first_hops_vec[..])
let count = get_slice!(1)[0];
for _ in 0..count {
scid += 1;
- let rnid = node_pks.iter().skip(slice_to_be16(get_slice!(2))as usize % node_pks.len()).next().unwrap();
+ let (rnid, _) =
+ node_pks.iter().skip(slice_to_be16(get_slice!(2)) as usize % node_pks.len()).next().unwrap();
$last_hops.push(RouteHint(vec![RouteHintHop {
src_node_id: *rnid,
short_channel_id: scid,
($first_hops: expr, $node_pks: expr, $route_params: expr) => {
let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &net_graph, &logger);
let random_seed_bytes: [u8; 32] = [get_slice!(1)[0]; 32];
- for target in $node_pks {
+ for (target, ()) in $node_pks {
let final_value_msat = slice_to_be64(get_slice!(8));
let final_cltv_expiry_delta = slice_to_be32(get_slice!(4));
let route_params = $route_params(final_value_msat, final_cltv_expiry_delta, target);
return;
}
let msg = decode_msg_with_len16!(msgs::UnsignedNodeAnnouncement, 288);
- node_pks.insert(get_pubkey_from_node_id!(msg.node_id));
+ node_pks.insert(get_pubkey_from_node_id!(msg.node_id), ());
let _ = net_graph.update_node_from_unsigned_announcement(&msg);
},
1 => {
let msg = decode_msg_with_len16!(msgs::UnsignedChannelAnnouncement, 32+8+33*4);
- node_pks.insert(get_pubkey_from_node_id!(msg.node_id_1));
- node_pks.insert(get_pubkey_from_node_id!(msg.node_id_2));
+ node_pks.insert(get_pubkey_from_node_id!(msg.node_id_1), ());
+ node_pks.insert(get_pubkey_from_node_id!(msg.node_id_2), ());
let _ = net_graph.update_channel_from_unsigned_announcement::
<&FuzzChainSource<'_, '_, Out>>(&msg, &None);
},
2 => {
let msg = decode_msg_with_len16!(msgs::UnsignedChannelAnnouncement, 32+8+33*4);
- node_pks.insert(get_pubkey_from_node_id!(msg.node_id_1));
- node_pks.insert(get_pubkey_from_node_id!(msg.node_id_2));
+ node_pks.insert(get_pubkey_from_node_id!(msg.node_id_1), ());
+ node_pks.insert(get_pubkey_from_node_id!(msg.node_id_2), ());
let _ = net_graph.update_channel_from_unsigned_announcement(&msg, &Some(&chain_source));
},
3 => {
});
}
(payinfo, BlindedPath {
- introduction_node_id: hop.src_node_id,
+ introduction_node: IntroductionNode::NodeId(hop.src_node_id),
blinding_point: dummy_pk,
blinded_hops,
})
}).collect();
let mut features = Bolt12InvoiceFeatures::empty();
features.set_basic_mpp_optional();
- find_routes!(first_hops, vec![dummy_pk].iter(), |final_amt, _, _| {
+ find_routes!(first_hops, [(dummy_pk, ())].iter(), |final_amt, _, _| {
RouteParameters::from_payment_params_and_value(PaymentParameters::blinded(last_hops.clone())
.with_bolt12_features(features.clone()).unwrap(),
final_amt)
fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<TestChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
self.update_ret.lock().unwrap().clone()
}
+
+ fn archive_persisted_channel(&self, _: OutPoint) {
+ }
}
[package]
name = "lightning-background-processor"
-version = "0.0.121"
+version = "0.0.123-beta"
authors = ["Valentine Wallace <vwallace@protonmail.com>"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning"
[dependencies]
bitcoin = { version = "0.30.2", default-features = false }
-lightning = { version = "0.0.121", path = "../lightning", default-features = false }
-lightning-rapid-gossip-sync = { version = "0.0.121", path = "../lightning-rapid-gossip-sync", default-features = false }
+lightning = { version = "0.0.123-beta", path = "../lightning", default-features = false }
+lightning-rapid-gossip-sync = { version = "0.0.123-beta", path = "../lightning-rapid-gossip-sync", default-features = false }
[dev-dependencies]
tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "time" ] }
-lightning = { version = "0.0.121", path = "../lightning", features = ["_test_utils"] }
-lightning-invoice = { version = "0.29.0", path = "../lightning-invoice" }
-lightning-persister = { version = "0.0.121", path = "../lightning-persister" }
+lightning = { version = "0.0.123-beta", path = "../lightning", features = ["_test_utils"] }
+lightning-invoice = { version = "0.31.0-beta", path = "../lightning-invoice" }
+lightning-persister = { version = "0.0.123-beta", path = "../lightning-persister" }
use lightning::chain;
use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
use lightning::chain::chainmonitor::{ChainMonitor, Persist};
-use lightning::sign::{EntropySource, NodeSigner, SignerProvider};
use lightning::events::{Event, PathFailure};
#[cfg(feature = "std")]
use lightning::events::EventHandler;
#[cfg(any(feature = "std", feature = "futures"))]
use lightning::events::EventsProvider;
-use lightning::ln::channelmanager::ChannelManager;
+use lightning::ln::channelmanager::AChannelManager;
use lightning::ln::msgs::OnionMessageHandler;
use lightning::ln::peer_handler::APeerManager;
use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
use lightning::routing::utxo::UtxoLookup;
-use lightning::routing::router::Router;
use lightning::routing::scoring::{ScoreUpdate, WriteableScore};
use lightning::util::logger::Logger;
use lightning::util::persist::Persister;
/// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
/// unilateral chain closure fees are at risk.
///
+/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
+/// [`ChannelManager::timer_tick_occurred`]: lightning::ln::channelmanager::ChannelManager::timer_tick_occurred
/// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
/// [`Event`]: lightning::events::Event
/// [`PeerManager::timer_tick_occurred`]: lightning::ln::peer_handler::PeerManager::timer_tick_occurred
$timer_elapsed: expr, $check_slow_await: expr, $time_fetch: expr,
) => { {
log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
- $channel_manager.timer_tick_occurred();
+ $channel_manager.get_cm().timer_tick_occurred();
log_trace!($logger, "Rebroadcasting monitor's pending claims on startup");
$chain_monitor.rebroadcast_pending_claims();
break;
}
- if $channel_manager.get_and_clear_needs_persistence() {
+ if $channel_manager.get_cm().get_and_clear_needs_persistence() {
log_trace!($logger, "Persisting ChannelManager...");
- $persister.persist_manager(&*$channel_manager)?;
+ $persister.persist_manager(&$channel_manager)?;
log_trace!($logger, "Done persisting ChannelManager.");
}
if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
- $channel_manager.timer_tick_occurred();
+ $channel_manager.get_cm().timer_tick_occurred();
last_freshness_call = $get_timer(FRESHNESS_TIMER);
}
if $timer_elapsed(&mut last_onion_message_handler_call, ONION_MESSAGE_HANDLER_TIMER) {
// After we exit, ensure we persist the ChannelManager one final time - this avoids
// some races where users quit while channel updates were in-flight, with
// ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
- $persister.persist_manager(&*$channel_manager)?;
+ $persister.persist_manager(&$channel_manager)?;
// Persist Scorer on exit
if let Some(ref scorer) = $scorer {
/// # use std::sync::atomic::{AtomicBool, Ordering};
/// # use std::time::SystemTime;
/// # use lightning_background_processor::{process_events_async, GossipSync};
-/// # struct MyStore {}
-/// # impl lightning::util::persist::KVStore for MyStore {
+/// # struct Logger {}
+/// # impl lightning::util::logger::Logger for Logger {
+/// # fn log(&self, _record: lightning::util::logger::Record) {}
+/// # }
+/// # struct Store {}
+/// # impl lightning::util::persist::KVStore for Store {
/// # fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> io::Result<Vec<u8>> { Ok(Vec::new()) }
/// # fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> { Ok(()) }
/// # fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> io::Result<()> { Ok(()) }
/// # fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result<Vec<String>> { Ok(Vec::new()) }
/// # }
-/// # struct MyEventHandler {}
-/// # impl MyEventHandler {
+/// # struct EventHandler {}
+/// # impl EventHandler {
/// # async fn handle_event(&self, _: lightning::events::Event) {}
/// # }
/// # #[derive(Eq, PartialEq, Clone, Hash)]
-/// # struct MySocketDescriptor {}
-/// # impl lightning::ln::peer_handler::SocketDescriptor for MySocketDescriptor {
+/// # struct SocketDescriptor {}
+/// # impl lightning::ln::peer_handler::SocketDescriptor for SocketDescriptor {
/// # fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { 0 }
/// # fn disconnect_socket(&mut self) {}
/// # }
-/// # type MyBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
-/// # type MyFeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
-/// # type MyNodeSigner = dyn lightning::sign::NodeSigner + Send + Sync;
-/// # type MyUtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync;
-/// # type MyFilter = dyn lightning::chain::Filter + Send + Sync;
-/// # type MyLogger = dyn lightning::util::logger::Logger + Send + Sync;
-/// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyStore>>;
-/// # type MyPeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<MySocketDescriptor, MyChainMonitor, MyBroadcaster, MyFeeEstimator, Arc<MyUtxoLookup>, MyLogger>;
-/// # type MyNetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<MyLogger>>;
-/// # type MyGossipSync = lightning::routing::gossip::P2PGossipSync<Arc<MyNetworkGraph>, Arc<MyUtxoLookup>, Arc<MyLogger>>;
-/// # type MyChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyLogger>;
-/// # type MyScorer = RwLock<lightning::routing::scoring::ProbabilisticScorer<Arc<MyNetworkGraph>, Arc<MyLogger>>>;
-///
-/// # async fn setup_background_processing(my_persister: Arc<MyStore>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
-/// let background_persister = Arc::clone(&my_persister);
-/// let background_event_handler = Arc::clone(&my_event_handler);
-/// let background_chain_mon = Arc::clone(&my_chain_monitor);
-/// let background_chan_man = Arc::clone(&my_channel_manager);
-/// let background_gossip_sync = GossipSync::p2p(Arc::clone(&my_gossip_sync));
-/// let background_peer_man = Arc::clone(&my_peer_manager);
-/// let background_logger = Arc::clone(&my_logger);
-/// let background_scorer = Arc::clone(&my_scorer);
+/// # type ChainMonitor<B, F, FE> = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<F>, Arc<B>, Arc<FE>, Arc<Logger>, Arc<Store>>;
+/// # type NetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<Logger>>;
+/// # type P2PGossipSync<UL> = lightning::routing::gossip::P2PGossipSync<Arc<NetworkGraph>, Arc<UL>, Arc<Logger>>;
+/// # type ChannelManager<B, F, FE> = lightning::ln::channelmanager::SimpleArcChannelManager<ChainMonitor<B, F, FE>, B, FE, Logger>;
+/// # type Scorer = RwLock<lightning::routing::scoring::ProbabilisticScorer<Arc<NetworkGraph>, Arc<Logger>>>;
+/// # type PeerManager<B, F, FE, UL> = lightning::ln::peer_handler::SimpleArcPeerManager<SocketDescriptor, ChainMonitor<B, F, FE>, B, FE, Arc<UL>, Logger>;
+/// #
+/// # struct Node<
+/// # B: lightning::chain::chaininterface::BroadcasterInterface + Send + Sync + 'static,
+/// # F: lightning::chain::Filter + Send + Sync + 'static,
+/// # FE: lightning::chain::chaininterface::FeeEstimator + Send + Sync + 'static,
+/// # UL: lightning::routing::utxo::UtxoLookup + Send + Sync + 'static,
+/// # > {
+/// # peer_manager: Arc<PeerManager<B, F, FE, UL>>,
+/// # event_handler: Arc<EventHandler>,
+/// # channel_manager: Arc<ChannelManager<B, F, FE>>,
+/// # chain_monitor: Arc<ChainMonitor<B, F, FE>>,
+/// # gossip_sync: Arc<P2PGossipSync<UL>>,
+/// # persister: Arc<Store>,
+/// # logger: Arc<Logger>,
+/// # scorer: Arc<Scorer>,
+/// # }
+/// #
+/// # async fn setup_background_processing<
+/// # B: lightning::chain::chaininterface::BroadcasterInterface + Send + Sync + 'static,
+/// # F: lightning::chain::Filter + Send + Sync + 'static,
+/// # FE: lightning::chain::chaininterface::FeeEstimator + Send + Sync + 'static,
+/// # UL: lightning::routing::utxo::UtxoLookup + Send + Sync + 'static,
+/// # >(node: Node<B, F, FE, UL>) {
+/// let background_persister = Arc::clone(&node.persister);
+/// let background_event_handler = Arc::clone(&node.event_handler);
+/// let background_chain_mon = Arc::clone(&node.chain_monitor);
+/// let background_chan_man = Arc::clone(&node.channel_manager);
+/// let background_gossip_sync = GossipSync::p2p(Arc::clone(&node.gossip_sync));
+/// let background_peer_man = Arc::clone(&node.peer_manager);
+/// let background_logger = Arc::clone(&node.logger);
+/// let background_scorer = Arc::clone(&node.scorer);
///
/// // Setup the sleeper.
/// let (stop_sender, stop_receiver) = tokio::sync::watch::channel(());
/// sleeper,
/// mobile_interruptable_platform,
/// || Some(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap())
-/// )
-/// .await
-/// .expect("Failed to process events");
+/// )
+/// .await
+/// .expect("Failed to process events");
/// });
///
/// // Stop the background processing.
'a,
UL: 'static + Deref + Send + Sync,
CF: 'static + Deref + Send + Sync,
- CW: 'static + Deref + Send + Sync,
T: 'static + Deref + Send + Sync,
- ES: 'static + Deref + Send + Sync,
- NS: 'static + Deref + Send + Sync,
- SP: 'static + Deref + Send + Sync,
F: 'static + Deref + Send + Sync,
- R: 'static + Deref + Send + Sync,
G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
L: 'static + Deref + Send + Sync,
P: 'static + Deref + Send + Sync,
EventHandlerFuture: core::future::Future<Output = ()>,
EventHandler: Fn(Event) -> EventHandlerFuture,
PS: 'static + Deref + Send,
- M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::EcdsaSigner, CF, T, F, L, P>> + Send + Sync,
- CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
+ M: 'static + Deref<Target = ChainMonitor<<CM::Target as AChannelManager>::Signer, CF, T, F, L, P>> + Send + Sync,
+ CM: 'static + Deref + Send + Sync,
PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
PM: 'static + Deref + Send + Sync,
where
UL::Target: 'static + UtxoLookup,
CF::Target: 'static + chain::Filter,
- CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: 'static + BroadcasterInterface,
- ES::Target: 'static + EntropySource,
- NS::Target: 'static + NodeSigner,
- SP::Target: 'static + SignerProvider,
F::Target: 'static + FeeEstimator,
- R::Target: 'static + Router,
L::Target: 'static + Logger,
- P::Target: 'static + Persist<<SP::Target as SignerProvider>::EcdsaSigner>,
- PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
+ P::Target: 'static + Persist<<CM::Target as AChannelManager>::Signer>,
+ PS::Target: 'static + Persister<'a, CM, L, SC>,
+ CM::Target: AChannelManager + Send + Sync,
PM::Target: APeerManager + Send + Sync,
{
let mut should_break = false;
define_run_body!(
persister, chain_monitor,
chain_monitor.process_pending_events_async(async_event_handler).await,
- channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
+ channel_manager, channel_manager.get_cm().process_pending_events_async(async_event_handler).await,
peer_manager, process_onion_message_handler_events_async(&peer_manager, async_event_handler).await,
gossip_sync, logger, scorer, should_break, {
let fut = Selector {
- a: channel_manager.get_event_or_persistence_needed_future(),
+ a: channel_manager.get_cm().get_event_or_persistence_needed_future(),
b: chain_monitor.get_update_future(),
c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
};
'a,
UL: 'static + Deref + Send + Sync,
CF: 'static + Deref + Send + Sync,
- CW: 'static + Deref + Send + Sync,
T: 'static + Deref + Send + Sync,
- ES: 'static + Deref + Send + Sync,
- NS: 'static + Deref + Send + Sync,
- SP: 'static + Deref + Send + Sync,
F: 'static + Deref + Send + Sync,
- R: 'static + Deref + Send + Sync,
G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
L: 'static + Deref + Send + Sync,
P: 'static + Deref + Send + Sync,
EH: 'static + EventHandler + Send,
PS: 'static + Deref + Send,
- M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::EcdsaSigner, CF, T, F, L, P>> + Send + Sync,
- CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
+ M: 'static + Deref<Target = ChainMonitor<<CM::Target as AChannelManager>::Signer, CF, T, F, L, P>> + Send + Sync,
+ CM: 'static + Deref + Send + Sync,
PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
PM: 'static + Deref + Send + Sync,
where
UL::Target: 'static + UtxoLookup,
CF::Target: 'static + chain::Filter,
- CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
T::Target: 'static + BroadcasterInterface,
- ES::Target: 'static + EntropySource,
- NS::Target: 'static + NodeSigner,
- SP::Target: 'static + SignerProvider,
F::Target: 'static + FeeEstimator,
- R::Target: 'static + Router,
L::Target: 'static + Logger,
- P::Target: 'static + Persist<<SP::Target as SignerProvider>::EcdsaSigner>,
- PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
+ P::Target: 'static + Persist<<CM::Target as AChannelManager>::Signer>,
+ PS::Target: 'static + Persister<'a, CM, L, SC>,
+ CM::Target: AChannelManager + Send + Sync,
PM::Target: APeerManager + Send + Sync,
{
let stop_thread = Arc::new(AtomicBool::new(false));
};
define_run_body!(
persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
- channel_manager, channel_manager.process_pending_events(&event_handler),
+ channel_manager, channel_manager.get_cm().process_pending_events(&event_handler),
peer_manager,
peer_manager.onion_message_handler().process_pending_events(&event_handler),
gossip_sync, logger, scorer, stop_thread.load(Ordering::Acquire),
{ Sleeper::from_two_futures(
- channel_manager.get_event_or_persistence_needed_future(),
- chain_monitor.get_update_future()
+ &channel_manager.get_cm().get_event_or_persistence_needed_future(),
+ &chain_monitor.get_update_future()
).wait_timeout(Duration::from_millis(100)); },
|_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false,
|| {
#[cfg(all(feature = "std", test))]
mod tests {
+ use bitcoin::{ScriptBuf, Txid};
use bitcoin::blockdata::constants::{genesis_block, ChainHash};
use bitcoin::blockdata::locktime::absolute::LockTime;
use bitcoin::blockdata::transaction::{Transaction, TxOut};
+ use bitcoin::hashes::Hash;
use bitcoin::network::constants::Network;
use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
- use lightning::chain::{BestBlock, Confirm, chainmonitor};
+ use lightning::chain::{BestBlock, Confirm, chainmonitor, Filter};
use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
- use lightning::sign::{InMemorySigner, KeysManager};
+ use lightning::sign::{InMemorySigner, KeysManager, ChangeDestinationSource};
use lightning::chain::transaction::OutPoint;
use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
use lightning::{get_event_msg, get_event};
- use lightning::ln::PaymentHash;
+ use lightning::ln::{PaymentHash, ChannelId};
use lightning::ln::channelmanager;
use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
use lightning::ln::features::{ChannelFeatures, NodeFeatures};
CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY,
NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY,
SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY};
+ use lightning::util::sweep::{OutputSweeper, OutputSpendStatus};
use lightning_persister::fs_store::FilesystemStore;
use std::collections::VecDeque;
use std::{fs, env};
Arc<DefaultRouter<
Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
Arc<test_utils::TestLogger>,
+ Arc<KeysManager>,
Arc<LockingWrapper<TestScorer>>,
(),
TestScorer>
logger: Arc<test_utils::TestLogger>,
best_block: BestBlock,
scorer: Arc<LockingWrapper<TestScorer>>,
+ sweeper: Arc<OutputSweeper<Arc<test_utils::TestBroadcaster>, Arc<TestWallet>,
+ Arc<test_utils::TestFeeEstimator>, Arc<dyn Filter + Sync + Send>, Arc<FilesystemStore>,
+ Arc<test_utils::TestLogger>, Arc<KeysManager>>>,
}
impl Node {
}
}
+ struct TestWallet {}
+
+ impl ChangeDestinationSource for TestWallet {
+ fn get_change_destination_script(&self) -> Result<ScriptBuf, ()> {
+ Ok(ScriptBuf::new())
+ }
+ }
+
fn get_full_filepath(filepath: String, filename: String) -> String {
let mut path = PathBuf::from(filepath);
path.push(filename);
let genesis_block = genesis_block(network);
let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
let scorer = Arc::new(LockingWrapper::new(TestScorer::new()));
+ let now = Duration::from_secs(genesis_block.header.time as u64);
let seed = [i as u8; 32];
- let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), Default::default()));
+ let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
+ let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), Arc::clone(&keys_manager), scorer.clone(), Default::default()));
let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin));
let kv_store = Arc::new(FilesystemStore::new(format!("{}_persister_{}", &persist_dir, i).into()));
let now = Duration::from_secs(genesis_block.header.time as u64);
let best_block = BestBlock::from_network(network);
let params = ChainParameters { network, best_block };
let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params, genesis_block.header.time));
+ let wallet = Arc::new(TestWallet {});
+ let sweeper = Arc::new(OutputSweeper::new(best_block, Arc::clone(&tx_broadcaster), Arc::clone(&fee_estimator),
+ None::<Arc<dyn Filter + Sync + Send>>, Arc::clone(&keys_manager), wallet, Arc::clone(&kv_store), Arc::clone(&logger)));
let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
let msg_handler = MessageHandler {
onion_message_handler: IgnoringMessageHandler{}, custom_message_handler: IgnoringMessageHandler{}
};
let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), keys_manager.clone()));
- let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, kv_store, tx_broadcaster, network_graph, logger, best_block, scorer };
+ let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, kv_store, tx_broadcaster, network_graph, logger, best_block, scorer, sweeper };
nodes.push(node);
}
fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
for i in 1..=depth {
- let prev_blockhash = node.best_block.block_hash();
- let height = node.best_block.height() + 1;
+ let prev_blockhash = node.best_block.block_hash;
+ let height = node.best_block.height + 1;
let header = create_dummy_header(prev_blockhash, height);
let txdata = vec![(0, tx)];
node.best_block = BestBlock::new(header.block_hash(), height);
1 => {
node.node.transactions_confirmed(&header, &txdata, height);
node.chain_monitor.transactions_confirmed(&header, &txdata, height);
+ node.sweeper.transactions_confirmed(&header, &txdata, height);
},
x if x == depth => {
+ // We need the TestBroadcaster to know about the new height so that it doesn't think
+ // we're violating the time lock requirements of transactions broadcasted at that
+ // point.
+ node.tx_broadcaster.blocks.lock().unwrap().push((genesis_block(Network::Bitcoin), height));
node.node.best_block_updated(&header, height);
node.chain_monitor.best_block_updated(&header, height);
+ node.sweeper.best_block_updated(&header, height);
},
_ => {},
}
}
}
+
+ fn advance_chain(node: &mut Node, num_blocks: u32) {
+ for i in 1..=num_blocks {
+ let prev_blockhash = node.best_block.block_hash;
+ let height = node.best_block.height + 1;
+ let header = create_dummy_header(prev_blockhash, height);
+ node.best_block = BestBlock::new(header.block_hash(), height);
+ if i == num_blocks {
+ // We need the TestBroadcaster to know about the new height so that it doesn't think
+ // we're violating the time lock requirements of transactions broadcasted at that
+ // point.
+ node.tx_broadcaster.blocks.lock().unwrap().push((genesis_block(Network::Bitcoin), height));
+ node.node.best_block_updated(&header, height);
+ node.chain_monitor.best_block_updated(&header, height);
+ node.sweeper.best_block_updated(&header, height);
+ }
+ }
+ }
+
fn confirm_transaction(node: &mut Node, tx: &Transaction) {
confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
}
}
// Force-close the channel.
- nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 }), &nodes[1].node.get_our_node_id()).unwrap();
// Check that the force-close updates are persisted.
check_persisted_data!(nodes[0].node, filepath.clone());
let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
+ let broadcast_funding = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
+ assert_eq!(broadcast_funding.txid(), funding_tx.txid());
+ assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
if !std::thread::panicking() {
bg_processor.stop().unwrap();
.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
.expect("Events not handled within deadline");
match event {
- Event::SpendableOutputs { .. } => {},
+ Event::SpendableOutputs { outputs, channel_id } => {
+ nodes[0].sweeper.track_spendable_outputs(outputs, channel_id, false, Some(153)).unwrap();
+ },
_ => panic!("Unexpected event: {:?}", event),
}
+ // Check we don't generate an initial sweeping tx until we reach the required height.
+ assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 1);
+ let tracked_output = nodes[0].sweeper.tracked_spendable_outputs().first().unwrap().clone();
+ if let Some(sweep_tx_0) = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop() {
+ assert!(!tracked_output.is_spent_in(&sweep_tx_0));
+ match tracked_output.status {
+ OutputSpendStatus::PendingInitialBroadcast { delayed_until_height } => {
+ assert_eq!(delayed_until_height, Some(153));
+ }
+ _ => panic!("Unexpected status"),
+ }
+ }
+
+ advance_chain(&mut nodes[0], 3);
+
+ // Check we generate an initial sweeping tx.
+ assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 1);
+ let tracked_output = nodes[0].sweeper.tracked_spendable_outputs().first().unwrap().clone();
+ let sweep_tx_0 = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
+ match tracked_output.status {
+ OutputSpendStatus::PendingFirstConfirmation { latest_spending_tx, .. } => {
+ assert_eq!(sweep_tx_0.txid(), latest_spending_tx.txid());
+ }
+ _ => panic!("Unexpected status"),
+ }
+
+ // Check we regenerate and rebroadcast the sweeping tx each block.
+ advance_chain(&mut nodes[0], 1);
+ assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 1);
+ let tracked_output = nodes[0].sweeper.tracked_spendable_outputs().first().unwrap().clone();
+ let sweep_tx_1 = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
+ match tracked_output.status {
+ OutputSpendStatus::PendingFirstConfirmation { latest_spending_tx, .. } => {
+ assert_eq!(sweep_tx_1.txid(), latest_spending_tx.txid());
+ }
+ _ => panic!("Unexpected status"),
+ }
+ assert_ne!(sweep_tx_0, sweep_tx_1);
+
+ advance_chain(&mut nodes[0], 1);
+ assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 1);
+ let tracked_output = nodes[0].sweeper.tracked_spendable_outputs().first().unwrap().clone();
+ let sweep_tx_2 = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
+ match tracked_output.status {
+ OutputSpendStatus::PendingFirstConfirmation { latest_spending_tx, .. } => {
+ assert_eq!(sweep_tx_2.txid(), latest_spending_tx.txid());
+ }
+ _ => panic!("Unexpected status"),
+ }
+ assert_ne!(sweep_tx_0, sweep_tx_2);
+ assert_ne!(sweep_tx_1, sweep_tx_2);
+
+ // Check we still track the spendable outputs up to ANTI_REORG_DELAY confirmations.
+ confirm_transaction_depth(&mut nodes[0], &sweep_tx_2, 5);
+ assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 1);
+ let tracked_output = nodes[0].sweeper.tracked_spendable_outputs().first().unwrap().clone();
+ match tracked_output.status {
+ OutputSpendStatus::PendingThresholdConfirmations { latest_spending_tx, .. } => {
+ assert_eq!(sweep_tx_2.txid(), latest_spending_tx.txid());
+ }
+ _ => panic!("Unexpected status"),
+ }
+
+ // Check we still see the transaction as confirmed if we unconfirm any untracked
+ // transaction. (We previously had a bug that would mark tracked transactions as
+ // unconfirmed if any transaction at an unknown block height would be unconfirmed.)
+ let unconf_txid = Txid::from_slice(&[0; 32]).unwrap();
+ nodes[0].sweeper.transaction_unconfirmed(&unconf_txid);
+
+ assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 1);
+ let tracked_output = nodes[0].sweeper.tracked_spendable_outputs().first().unwrap().clone();
+ match tracked_output.status {
+ OutputSpendStatus::PendingThresholdConfirmations { latest_spending_tx, .. } => {
+ assert_eq!(sweep_tx_2.txid(), latest_spending_tx.txid());
+ }
+ _ => panic!("Unexpected status"),
+ }
+
+ // Check we stop tracking the spendable outputs when one of the txs reaches
+ // ANTI_REORG_DELAY confirmations.
+ confirm_transaction_depth(&mut nodes[0], &sweep_tx_0, ANTI_REORG_DELAY);
+ assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 0);
+
if !std::thread::panicking() {
bg_processor.stop().unwrap();
}
[package]
name = "lightning-block-sync"
-version = "0.0.121"
+version = "0.0.123-beta"
authors = ["Jeffrey Czyz", "Matt Corallo"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning"
[dependencies]
bitcoin = "0.30.2"
hex = { package = "hex-conservative", version = "0.1.1", default-features = false }
-lightning = { version = "0.0.121", path = "../lightning" }
+lightning = { version = "0.0.123-beta", path = "../lightning" }
tokio = { version = "1.35", features = [ "io-util", "net", "time", "rt" ], optional = true }
serde_json = { version = "1.0", optional = true }
chunked_transfer = { version = "1.4", optional = true }
[dev-dependencies]
-lightning = { version = "0.0.121", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.123-beta", path = "../lightning", features = ["_test_utils"] }
tokio = { version = "1.35", features = [ "macros", "rt" ] }
HttpMessageLength::Empty => { Vec::new() },
HttpMessageLength::ContentLength(length) => {
if length == 0 || length > MAX_HTTP_MESSAGE_BODY_SIZE {
- return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "out of range"))
+ return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, format!("invalid response length: {} bytes", length)));
} else {
let mut content = vec![0; length];
#[cfg(feature = "tokio")]
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
- assert_eq!(e.get_ref().unwrap().to_string(), "out of range");
+ assert_eq!(e.get_ref().unwrap().to_string(), "invalid response length: 8032001 bytes");
},
Ok(_) => panic!("Expected error"),
}
use crate::test_utils::{Blockchain, MockChainListener};
use super::*;
- use bitcoin::network::constants::Network;
-
#[tokio::test]
async fn sync_from_same_chain() {
let chain = Blockchain::default().with_height(4);
[package]
name = "lightning-custom-message"
-version = "0.0.121"
+version = "0.0.123-beta"
authors = ["Jeffrey Czyz"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning"
[dependencies]
bitcoin = "0.30.2"
-lightning = { version = "0.0.121", path = "../lightning" }
+lightning = { version = "0.0.123-beta", path = "../lightning" }
[package]
name = "lightning-invoice"
description = "Data structures to parse and serialize BOLT11 lightning invoices"
-version = "0.29.0"
+version = "0.31.0-beta"
authors = ["Sebastian Geisler <sgeisler@wh2.tu-dresden.de>"]
documentation = "https://docs.rs/lightning-invoice/"
license = "MIT OR Apache-2.0"
[features]
default = ["std"]
-no-std = ["hashbrown", "lightning/no-std"]
-std = ["bitcoin/std", "num-traits/std", "lightning/std", "bech32/std"]
+no-std = ["lightning/no-std"]
+std = ["bitcoin/std", "lightning/std", "bech32/std"]
[dependencies]
bech32 = { version = "0.9.0", default-features = false }
-lightning = { version = "0.0.121", path = "../lightning", default-features = false }
+lightning = { version = "0.0.123-beta", path = "../lightning", default-features = false }
secp256k1 = { version = "0.27.0", default-features = false, features = ["recovery", "alloc"] }
-num-traits = { version = "0.2.8", default-features = false }
-hashbrown = { version = "0.8", optional = true }
serde = { version = "1.0.118", optional = true }
bitcoin = { version = "0.30.2", default-features = false }
[dev-dependencies]
-lightning = { version = "0.0.121", path = "../lightning", default-features = false, features = ["_test_utils"] }
+lightning = { version = "0.0.123-beta", path = "../lightning", default-features = false, features = ["_test_utils"] }
hex = { package = "hex-conservative", version = "0.1.1", default-features = false }
serde_json = { version = "1"}
+hashbrown = { version = "0.13", default-features = false }
#[cfg(feature = "std")]
use std::error;
+#[cfg(not(feature = "std"))]
use core::convert::TryFrom;
use core::fmt;
use core::fmt::{Display, Formatter};
use lightning::routing::gossip::RoutingFees;
use lightning::routing::router::{RouteHint, RouteHintHop};
-use num_traits::{CheckedAdd, CheckedMul};
-
use secp256k1::ecdsa::{RecoveryId, RecoverableSignature};
use secp256k1::PublicKey;
if b32.len() != 7 {
return Err(Bolt11ParseError::InvalidSliceLength("PositiveTimestamp::from_base32()".into()));
}
- let timestamp: u64 = parse_int_be(b32, 32)
+ let timestamp: u64 = parse_u64_be(b32)
.expect("7*5bit < 64bit, no overflow possible");
match PositiveTimestamp::from_unix_timestamp(timestamp) {
Ok(t) => Ok(t),
}
}
-pub(crate) fn parse_int_be<T, U>(digits: &[U], base: T) -> Option<T>
- where T: CheckedAdd + CheckedMul + From<u8> + Default,
- U: Into<u8> + Copy
-{
- digits.iter().fold(Some(Default::default()), |acc, b|
- acc
- .and_then(|x| x.checked_mul(&base))
- .and_then(|x| x.checked_add(&(Into::<u8>::into(*b)).into()))
- )
-}
+macro_rules! define_parse_int_be { ($name: ident, $ty: ty) => {
+ fn $name(digits: &[u5]) -> Option<$ty> {
+ digits.iter().fold(Some(Default::default()), |acc, b|
+ acc
+ .and_then(|x| x.checked_mul(32))
+ .and_then(|x| x.checked_add((Into::<u8>::into(*b)).into()))
+ )
+ }
+} }
+define_parse_int_be!(parse_u16_be, u16);
+define_parse_int_be!(parse_u64_be, u64);
fn parse_tagged_parts(data: &[u5]) -> Result<Vec<RawTaggedField>, Bolt11ParseError> {
let mut parts = Vec::<RawTaggedField>::new();
// Ignore tag at data[0], it will be handled in the TaggedField parsers and
// parse the length to find the end of the tagged field's data
- let len = parse_int_be(&data[1..3], 32).expect("can't overflow");
+ let len = parse_u16_be(&data[1..3]).expect("can't overflow") as usize;
let last_element = 3 + len;
if data.len() < last_element {
type Err = Bolt11ParseError;
fn from_base32(field_data: &[u5]) -> Result<ExpiryTime, Bolt11ParseError> {
- match parse_int_be::<u64, u5>(field_data, 32)
+ match parse_u64_be(field_data)
.map(ExpiryTime::from_seconds)
{
Some(t) => Ok(t),
type Err = Bolt11ParseError;
fn from_base32(field_data: &[u5]) -> Result<MinFinalCltvExpiryDelta, Bolt11ParseError> {
- let expiry = parse_int_be::<u64, u5>(field_data, 32);
+ let expiry = parse_u64_be(field_data);
if let Some(expiry) = expiry {
Ok(MinFinalCltvExpiryDelta(expiry))
} else {
let hop = RouteHintHop {
src_node_id: PublicKey::from_slice(&hop_bytes[0..33])?,
- short_channel_id: parse_int_be(&channel_id, 256).expect("short chan ID slice too big?"),
+ short_channel_id: u64::from_be_bytes(channel_id),
fees: RoutingFees {
- base_msat: parse_int_be(&hop_bytes[41..45], 256).expect("slice too big?"),
- proportional_millionths: parse_int_be(&hop_bytes[45..49], 256).expect("slice too big?"),
+ base_msat: u32::from_be_bytes(hop_bytes[41..45].try_into().expect("slice too big?")),
+ proportional_millionths: u32::from_be_bytes(hop_bytes[45..49].try_into().expect("slice too big?")),
},
- cltv_expiry_delta: parse_int_be(&hop_bytes[49..51], 256).expect("slice too big?"),
+ cltv_expiry_delta: u16::from_be_bytes(hop_bytes[49..51].try_into().expect("slice too big?")),
htlc_minimum_msat: None,
htlc_maximum_msat: None,
};
#[test]
fn test_parse_int_from_bytes_be() {
- use crate::de::parse_int_be;
-
- assert_eq!(parse_int_be::<u32, u8>(&[1, 2, 3, 4], 256), Some(16909060));
- assert_eq!(parse_int_be::<u32, u8>(&[1, 3], 32), Some(35));
- assert_eq!(parse_int_be::<u32, u8>(&[255, 255, 255, 255], 256), Some(4294967295));
- assert_eq!(parse_int_be::<u32, u8>(&[1, 0, 0, 0, 0], 256), None);
+ use crate::de::parse_u16_be;
+
+ assert_eq!(parse_u16_be(&[
+ u5::try_from_u8(1).unwrap(), u5::try_from_u8(2).unwrap(),
+ u5::try_from_u8(3).unwrap(), u5::try_from_u8(4).unwrap()]
+ ), Some(34916));
+ assert_eq!(parse_u16_be(&[
+ u5::try_from_u8(2).unwrap(), u5::try_from_u8(0).unwrap(),
+ u5::try_from_u8(0).unwrap(), u5::try_from_u8(0).unwrap()]
+ ), None);
}
#[test]
use lightning::routing::router::{RouteHint, RouteHintHop};
use crate::PrivateRoute;
use bech32::FromBase32;
- use crate::de::parse_int_be;
let input = from_bech32(
"q20q82gphp2nflc7jtzrcazrra7wwgzxqc8u7754cdlpfrmccae92qgzqvzq2ps8pqqqqqqpqqqqq9qqqvpeuqa\
0x7e, 0x14, 0x8f, 0x78, 0xc7, 0x72, 0x55
][..]
).unwrap(),
- short_channel_id: parse_int_be(&[0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08], 256).expect("short chan ID slice too big?"),
+ short_channel_id: 0x0102030405060708,
fees: RoutingFees {
base_msat: 1,
proportional_millionths: 20,
0x7e, 0x14, 0x8f, 0x78, 0xc7, 0x72, 0x55
][..]
).unwrap(),
- short_channel_id: parse_int_be(&[0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a], 256).expect("short chan ID slice too big?"),
+ short_channel_id: 0x030405060708090a,
fees: RoutingFees {
base_msat: 2,
proportional_millionths: 30,
extern crate bech32;
#[macro_use] extern crate lightning;
-extern crate num_traits;
extern crate secp256k1;
extern crate alloc;
#[cfg(any(test, feature = "std"))]
#[allow(unused_imports)]
mod prelude {
- #[cfg(feature = "hashbrown")]
- extern crate hashbrown;
-
pub use alloc::{vec, vec::Vec, string::String};
- #[cfg(not(feature = "hashbrown"))]
- pub use std::collections::{HashMap, hash_map};
- #[cfg(feature = "hashbrown")]
- pub use self::hashbrown::{HashMap, HashSet, hash_map};
pub use alloc::string::ToString;
}
amount: None,
si_prefix: None,
timestamp: None,
- tagged_fields: Vec::new(),
+ tagged_fields: Vec::with_capacity(8),
error: None,
phantom_d: core::marker::PhantomData,
self.signed_invoice.recover_payee_pub_key().expect("was checked by constructor").0
}
+ /// Recover the payee's public key if one was included in the invoice, otherwise return the
+ /// recovered public key from the signature
+ pub fn get_payee_pub_key(&self) -> PublicKey {
+ match self.payee_pub_key() {
+ Some(pk) => *pk,
+ None => self.recover_payee_pub_key()
+ }
+ }
+
/// Returns the Duration since the Unix epoch at which the invoice expires.
/// Returning None if overflow occurred.
pub fn expires_at(&self) -> Option<Duration> {
let route_1 = RouteHint(vec![
RouteHintHop {
src_node_id: public_key,
- short_channel_id: de::parse_int_be(&[123; 8], 256).expect("short chan ID slice too big?"),
+ short_channel_id: u64::from_be_bytes([123; 8]),
fees: RoutingFees {
base_msat: 2,
proportional_millionths: 1,
},
RouteHintHop {
src_node_id: public_key,
- short_channel_id: de::parse_int_be(&[42; 8], 256).expect("short chan ID slice too big?"),
+ short_channel_id: u64::from_be_bytes([42; 8]),
fees: RoutingFees {
base_msat: 3,
proportional_millionths: 2,
},
RouteHintHop {
src_node_id: public_key,
- short_channel_id: de::parse_int_be(&[1; 8], 256).expect("short chan ID slice too big?"),
+ short_channel_id: u64::from_be_bytes([1; 8]),
fees: RoutingFees {
base_msat: 5,
proportional_millionths: 4,
use lightning::routing::router::{RouteHint, RouteHintHop, Router};
use lightning::util::logger::{Logger, Record};
use secp256k1::PublicKey;
+use alloc::collections::{btree_map, BTreeMap};
use core::ops::Deref;
use core::time::Duration;
+#[cfg(not(feature = "std"))]
use core::iter::Iterator;
/// Utility to create an invoice that can be paid to one of multiple nodes, or a "phantom invoice."
where
L::Target: Logger,
{
- let mut filtered_channels: HashMap<PublicKey, ChannelDetails> = HashMap::new();
+ let mut filtered_channels: BTreeMap<PublicKey, ChannelDetails> = BTreeMap::new();
let min_inbound_capacity = min_inbound_capacity_msat.unwrap_or(0);
let mut min_capacity_channel_exists = false;
let mut online_channel_exists = false;
}
match filtered_channels.entry(channel.counterparty.node_id) {
- hash_map::Entry::Occupied(mut entry) => {
+ btree_map::Entry::Occupied(mut entry) => {
let current_max_capacity = entry.get().inbound_capacity_msat;
// If this channel is public and the previous channel is not, ensure we replace the
// previous channel to avoid announcing non-public channels.
channel.inbound_capacity_msat);
}
}
- hash_map::Entry::Vacant(entry) => {
+ btree_map::Entry::Vacant(entry) => {
entry.insert(channel);
}
}
use bitcoin::{PubkeyHash, ScriptHash};
use bitcoin::hashes::hex::FromHex;
use bitcoin::hashes::{sha256, Hash};
-use lightning::ln::PaymentSecret;
-use lightning::routing::gossip::RoutingFees;
-use lightning::routing::router::{RouteHint, RouteHintHop};
use lightning_invoice::*;
use secp256k1::PublicKey;
use secp256k1::ecdsa::{RecoverableSignature, RecoveryId};
[package]
name = "lightning-net-tokio"
-version = "0.0.121"
+version = "0.0.123-beta"
authors = ["Matt Corallo"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning/"
[dependencies]
bitcoin = "0.30.2"
-lightning = { version = "0.0.121", path = "../lightning" }
+lightning = { version = "0.0.123-beta", path = "../lightning" }
tokio = { version = "1.35", features = [ "rt", "sync", "net", "time" ] }
[dev-dependencies]
tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
-lightning = { version = "0.0.121", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.123-beta", path = "../lightning", features = ["_test_utils"] }
break Disconnect::CloseConnection;
}
},
- SelectorOutput::B(_) => {},
+ SelectorOutput::B(some) => {
+ // The mpsc Receiver should only return `None` if the write side has been
+ // dropped, but that shouldn't be possible since its referenced by the Self in
+ // `us`.
+ debug_assert!(some.is_some());
+ },
SelectorOutput::C(res) => {
if res.is_err() { break Disconnect::PeerDisconnected; }
match reader.try_read(&mut buf) {
use lightning::ln::features::*;
use lightning::ln::msgs::*;
use lightning::ln::peer_handler::{MessageHandler, PeerManager};
- use lightning::ln::features::NodeFeatures;
use lightning::routing::gossip::NodeId;
use lightning::events::*;
use lightning::util::test_utils::TestNodeSigner;
fn handle_open_channel_v2(&self, _their_node_id: &PublicKey, _msg: &OpenChannelV2) {}
fn handle_accept_channel_v2(&self, _their_node_id: &PublicKey, _msg: &AcceptChannelV2) {}
fn handle_stfu(&self, _their_node_id: &PublicKey, _msg: &Stfu) {}
+ #[cfg(splicing)]
fn handle_splice(&self, _their_node_id: &PublicKey, _msg: &Splice) {}
+ #[cfg(splicing)]
fn handle_splice_ack(&self, _their_node_id: &PublicKey, _msg: &SpliceAck) {}
+ #[cfg(splicing)]
fn handle_splice_locked(&self, _their_node_id: &PublicKey, _msg: &SpliceLocked) {}
fn handle_tx_add_input(&self, _their_node_id: &PublicKey, _msg: &TxAddInput) {}
fn handle_tx_add_output(&self, _their_node_id: &PublicKey, _msg: &TxAddOutput) {}
[package]
name = "lightning-persister"
-version = "0.0.121"
+version = "0.0.123-beta"
authors = ["Valentine Wallace", "Matt Corallo"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning"
[dependencies]
bitcoin = "0.30.2"
-lightning = { version = "0.0.121", path = "../lightning" }
+lightning = { version = "0.0.123-beta", path = "../lightning" }
[target.'cfg(windows)'.dependencies]
windows-sys = { version = "0.48.0", default-features = false, features = ["Win32_Storage_FileSystem", "Win32_Foundation"] }
criterion = { version = "0.4", optional = true, default-features = false }
[dev-dependencies]
-lightning = { version = "0.0.121", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.123-beta", path = "../lightning", features = ["_test_utils"] }
bitcoin = { version = "0.30.2", default-features = false }
use lightning::ln::functional_test_utils::*;
use lightning::util::test_utils;
use lightning::util::persist::read_channel_monitors;
- use std::fs;
use std::str::FromStr;
impl Drop for FilesystemStore {
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
- let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
+ let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
// Set the store's directory to read-only, which should result in
// returning an unrecoverable failure when we then attempt to persist a
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
- let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
+ let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
// Create the store with an invalid directory name and test that the
// channel fails to open because the directories fail to be created. There
[package]
name = "lightning-rapid-gossip-sync"
-version = "0.0.121"
+version = "0.0.123-beta"
authors = ["Arik Sosman <git@arik.io>"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning"
std = ["lightning/std"]
[dependencies]
-lightning = { version = "0.0.121", path = "../lightning", default-features = false }
+lightning = { version = "0.0.123-beta", path = "../lightning", default-features = false }
bitcoin = { version = "0.30.2", default-features = false }
[target.'cfg(ldk_bench)'.dependencies]
criterion = { version = "0.4", optional = true, default-features = false }
[dev-dependencies]
-lightning = { version = "0.0.121", path = "../lightning", features = ["_test_utils"] }
+lightning = { version = "0.0.123-beta", path = "../lightning", features = ["_test_utils"] }
+++ /dev/null
-use core::fmt::Debug;
-use core::fmt::Formatter;
-use lightning::ln::msgs::{DecodeError, LightningError};
-
-/// All-encompassing standard error type that processing can return
-pub enum GraphSyncError {
- /// Error trying to read the update data, typically due to an erroneous data length indication
- /// that is greater than the actual amount of data provided
- DecodeError(DecodeError),
- /// Error applying the patch to the network graph, usually the result of updates that are too
- /// old or missing prerequisite data to the application of updates out of order
- LightningError(LightningError),
-}
-
-impl From<lightning::io::Error> for GraphSyncError {
- fn from(error: lightning::io::Error) -> Self {
- Self::DecodeError(DecodeError::Io(error.kind()))
- }
-}
-
-impl From<DecodeError> for GraphSyncError {
- fn from(error: DecodeError) -> Self {
- Self::DecodeError(error)
- }
-}
-
-impl From<LightningError> for GraphSyncError {
- fn from(error: LightningError) -> Self {
- Self::LightningError(error)
- }
-}
-
-impl Debug for GraphSyncError {
- fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
- match self {
- GraphSyncError::DecodeError(e) => f.write_fmt(format_args!("DecodeError: {:?}", e)),
- GraphSyncError::LightningError(e) => f.write_fmt(format_args!("LightningError: {:?}", e))
- }
- }
-}
use core::sync::atomic::{AtomicBool, Ordering};
use lightning::io;
+use lightning::ln::msgs::{DecodeError, LightningError};
use lightning::routing::gossip::NetworkGraph;
use lightning::util::logger::Logger;
-pub use crate::error::GraphSyncError;
-
-/// Error types that these functions can return
-mod error;
-
/// Core functionality of this crate
mod processing;
+/// All-encompassing standard error type that processing can return
+#[derive(Debug)]
+pub enum GraphSyncError {
+ /// Error trying to read the update data, typically due to an erroneous data length indication
+ /// that is greater than the actual amount of data provided
+ DecodeError(DecodeError),
+ /// Error applying the patch to the network graph, usually the result of updates that are too
+ /// old or missing prerequisite data to the application of updates out of order
+ LightningError(LightningError),
+}
+
+impl From<lightning::io::Error> for GraphSyncError {
+ fn from(error: lightning::io::Error) -> Self {
+ Self::DecodeError(DecodeError::Io(error.kind()))
+ }
+}
+
+impl From<DecodeError> for GraphSyncError {
+ fn from(error: DecodeError) -> Self {
+ Self::DecodeError(error)
+ }
+}
+
+impl From<LightningError> for GraphSyncError {
+ fn from(error: LightningError) -> Self {
+ Self::LightningError(error)
+ }
+}
+
/// The main Rapid Gossip Sync object.
///
/// See [crate-level documentation] for usage.
use lightning::ln::msgs::DecodeError;
use lightning::routing::gossip::NetworkGraph;
use lightning::util::test_utils::TestLogger;
- use crate::RapidGossipSync;
+ use crate::{GraphSyncError, RapidGossipSync};
#[test]
fn test_sync_from_file() {
let start = std::time::Instant::now();
let sync_result = rapid_sync
.sync_network_graph_with_file_path("./res/full_graph.lngossip");
- if let Err(crate::error::GraphSyncError::DecodeError(DecodeError::Io(io_error))) = &sync_result {
+ if let Err(GraphSyncError::DecodeError(DecodeError::Io(io_error))) = &sync_result {
let error_string = format!("Input file lightning-rapid-gossip-sync/res/full_graph.lngossip is missing! Download it from https://bitcoin.ninja/ldk-compressed_graph-285cb27df79-2022-07-21.bin\n\n{:?}", io_error);
#[cfg(not(require_route_graph_test))]
{
use lightning::util::ser::{BigSize, Readable};
use lightning::io;
-use crate::error::GraphSyncError;
-use crate::RapidGossipSync;
+use crate::{GraphSyncError, RapidGossipSync};
#[cfg(all(feature = "std", not(test)))]
use std::time::{SystemTime, UNIX_EPOCH};
-#[cfg(not(feature = "std"))]
+#[cfg(all(not(feature = "std"), not(test)))]
use alloc::{vec::Vec, borrow::ToOwned};
/// The purpose of this prefix is to identify the serialization format, should other rapid gossip
use lightning::routing::gossip::NetworkGraph;
use lightning::util::test_utils::TestLogger;
- use crate::error::GraphSyncError;
use crate::processing::STALE_RGS_UPDATE_AGE_LIMIT_SECS;
- use crate::RapidGossipSync;
+ use crate::{GraphSyncError, RapidGossipSync};
const VALID_RGS_BINARY: [u8; 300] = [
76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
[package]
name = "lightning-transaction-sync"
-version = "0.0.121"
+version = "0.0.123-beta"
authors = ["Elias Rohrer"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning"
async-interface = []
[dependencies]
-lightning = { version = "0.0.121", path = "../lightning", default-features = false, features = ["std"] }
+lightning = { version = "0.0.123-beta", path = "../lightning", default-features = false, features = ["std"] }
bitcoin = { version = "0.30.2", default-features = false }
bdk-macros = "0.6"
futures = { version = "0.3", optional = true }
electrum-client = { version = "0.18.0", optional = true }
[dev-dependencies]
-lightning = { version = "0.0.121", path = "../lightning", default-features = false, features = ["std", "_test_utils"] }
+lightning = { version = "0.0.123-beta", path = "../lightning", default-features = false, features = ["std", "_test_utils"] }
tokio = { version = "1.35.0", features = ["full"] }
-[target.'cfg(not(no_download))'.dev-dependencies]
+[target.'cfg(all(not(target_os = "windows"), not(no_download)))'.dev-dependencies]
electrsd = { version = "0.26.0", default-features = false, features = ["legacy", "esplora_a33e97e1", "bitcoind_25_0"] }
-[target.'cfg(no_download)'.dev-dependencies]
+[target.'cfg(all(not(target_os = "windows"), no_download))'.dev-dependencies]
electrsd = { version = "0.26.0", default-features = false, features = ["legacy"] }
use lightning::chain::{Confirm, WatchedOutput};
+use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
use bitcoin::{Txid, BlockHash, Transaction, OutPoint};
use bitcoin::block::Header;
// Outputs that were previously processed, but must not be forgotten yet as
// as we still need to monitor any spends on-chain.
pub watched_outputs: HashMap<OutPoint, WatchedOutput>,
+ // Outputs for which we previously saw a spend on-chain but kept around until the spends reach
+ // sufficient depth.
+ pub outputs_spends_pending_threshold_conf: Vec<(Txid, u32, OutPoint, WatchedOutput)>,
// The tip hash observed during our last sync.
pub last_sync_hash: Option<BlockHash>,
// Indicates whether we need to resync, e.g., after encountering an error.
Self {
watched_transactions: HashSet::new(),
watched_outputs: HashMap::new(),
+ outputs_spends_pending_threshold_conf: Vec::new(),
last_sync_hash: None,
pending_sync: false,
}
}
self.watched_transactions.insert(txid);
+
+ // If a previously-confirmed output spend is unconfirmed, re-add the watched output to
+ // the tracking map.
+ self.outputs_spends_pending_threshold_conf.retain(|(conf_txid, _, prev_outpoint, output)| {
+ if txid == *conf_txid {
+ self.watched_outputs.insert(*prev_outpoint, output.clone());
+ false
+ } else {
+ true
+ }
+ })
}
}
self.watched_transactions.remove(&ctx.tx.txid());
for input in &ctx.tx.input {
- self.watched_outputs.remove(&input.previous_output);
+ if let Some(output) = self.watched_outputs.remove(&input.previous_output) {
+ self.outputs_spends_pending_threshold_conf.push((ctx.tx.txid(), ctx.block_height, input.previous_output, output));
+ }
}
}
}
+
+ pub fn prune_output_spends(&mut self, cur_height: u32) {
+ self.outputs_spends_pending_threshold_conf.retain(|(_, conf_height, _, _)| {
+ cur_height < conf_height + ANTI_REORG_DELAY - 1
+ });
+ }
}
#[derive(Debug)]
pub(crate) struct ConfirmedTx {
pub tx: Transaction,
+ pub txid: Txid,
pub block_header: Header,
pub block_height: u32,
pub pos: usize,
for c in &confirmables {
c.best_block_updated(&tip_header, tip_height);
}
+
+ // Prune any sufficiently confirmed output spends
+ sync_state.prune_output_spends(tip_height);
}
match self.get_confirmed_transactions(&sync_state) {
// First, check the confirmation status of registered transactions as well as the
// status of dependent transactions of registered outputs.
- let mut confirmed_txs = Vec::new();
+ let mut confirmed_txs: Vec<ConfirmedTx> = Vec::new();
let mut watched_script_pubkeys = Vec::with_capacity(
sync_state.watched_transactions.len() + sync_state.watched_outputs.len());
let mut watched_txs = Vec::with_capacity(sync_state.watched_transactions.len());
for (i, script_history) in tx_results.iter().enumerate() {
let (txid, tx) = &watched_txs[i];
+ if confirmed_txs.iter().any(|ctx| ctx.txid == **txid) {
+ continue;
+ }
let mut filtered_history = script_history.iter().filter(|h| h.tx_hash == **txid);
if let Some(history) = filtered_history.next()
{
}
let txid = possible_output_spend.tx_hash;
+ if confirmed_txs.iter().any(|ctx| ctx.txid == txid) {
+ continue;
+ }
+
match self.client.transaction_get(&txid) {
Ok(tx) => {
let mut is_spend = false;
}
let confirmed_tx = ConfirmedTx {
tx: tx.clone(),
+ txid,
block_header, block_height: prob_conf_height,
pos,
};
}
}
- match maybe_await!(self.sync_best_block_updated(&confirmables, &tip_hash)) {
+ match maybe_await!(self.sync_best_block_updated(&confirmables, &mut sync_state, &tip_hash)) {
Ok(()) => {}
Err(InternalError::Inconsistency) => {
// Immediately restart syncing when we encounter any inconsistencies.
#[maybe_async]
fn sync_best_block_updated(
- &self, confirmables: &Vec<&(dyn Confirm + Sync + Send)>, tip_hash: &BlockHash,
+ &self, confirmables: &Vec<&(dyn Confirm + Sync + Send)>, sync_state: &mut SyncState, tip_hash: &BlockHash,
) -> Result<(), InternalError> {
// Inform the interface of the new block.
for c in confirmables {
c.best_block_updated(&tip_header, tip_height);
}
+
+ // Prune any sufficiently confirmed output spends
+ sync_state.prune_output_spends(tip_height);
}
} else {
return Err(InternalError::Inconsistency);
// First, check the confirmation status of registered transactions as well as the
// status of dependent transactions of registered outputs.
- let mut confirmed_txs = Vec::new();
+ let mut confirmed_txs: Vec<ConfirmedTx> = Vec::new();
for txid in &sync_state.watched_transactions {
- if let Some(confirmed_tx) = maybe_await!(self.get_confirmed_tx(&txid, None, None))? {
+ if confirmed_txs.iter().any(|ctx| ctx.txid == *txid) {
+ continue;
+ }
+ if let Some(confirmed_tx) = maybe_await!(self.get_confirmed_tx(*txid, None, None))? {
confirmed_txs.push(confirmed_tx);
}
}
{
if let Some(spending_txid) = output_status.txid {
if let Some(spending_tx_status) = output_status.status {
+ if confirmed_txs.iter().any(|ctx| ctx.txid == spending_txid) {
+ if spending_tx_status.confirmed {
+ // Skip inserting duplicate ConfirmedTx entry
+ continue;
+ } else {
+ log_trace!(self.logger, "Inconsistency: Detected previously-confirmed Tx {} as unconfirmed", spending_txid);
+ return Err(InternalError::Inconsistency);
+ }
+ }
+
if let Some(confirmed_tx) = maybe_await!(self
.get_confirmed_tx(
- &spending_txid,
+ spending_txid,
spending_tx_status.block_hash,
spending_tx_status.block_height,
))?
#[maybe_async]
fn get_confirmed_tx(
- &self, txid: &Txid, expected_block_hash: Option<BlockHash>, known_block_height: Option<u32>,
+ &self, txid: Txid, expected_block_hash: Option<BlockHash>, known_block_height: Option<u32>,
) -> Result<Option<ConfirmedTx>, InternalError> {
if let Some(merkle_block) = maybe_await!(self.client.get_merkle_block(&txid))? {
let block_header = merkle_block.header;
let mut matches = Vec::new();
let mut indexes = Vec::new();
let _ = merkle_block.txn.extract_matches(&mut matches, &mut indexes);
- if indexes.len() != 1 || matches.len() != 1 || matches[0] != *txid {
+ if indexes.len() != 1 || matches.len() != 1 || matches[0] != txid {
log_error!(self.logger, "Retrieved Merkle block for txid {} doesn't match expectations. This should not happen. Please verify server integrity.", txid);
return Err(InternalError::Failed);
}
// unwrap() safety: len() > 0 is checked above
let pos = *indexes.first().unwrap() as usize;
if let Some(tx) = maybe_await!(self.client.get_tx(&txid))? {
+ if tx.txid() != txid {
+ log_error!(self.logger, "Retrieved transaction for txid {} doesn't match expectations. This should not happen. Please verify server integrity.", txid);
+ return Err(InternalError::Failed);
+ }
+
if let Some(block_height) = known_block_height {
// We can take a shortcut here if a previous call already gave us the height.
- return Ok(Some(ConfirmedTx { tx, block_header, pos, block_height }));
+ return Ok(Some(ConfirmedTx { tx, txid, block_header, pos, block_height }));
}
let block_status = maybe_await!(self.client.get_block_status(&block_hash))?;
if let Some(block_height) = block_status.height {
- return Ok(Some(ConfirmedTx { tx, block_header, pos, block_height }));
+ return Ok(Some(ConfirmedTx { tx, txid, block_header, pos, block_height }));
} else {
// If any previously-confirmed block suddenly is no longer confirmed, we found
// an inconsistency and should start over.
-#![cfg(any(feature = "esplora-blocking", feature = "esplora-async", feature = "electrum"))]
+#![cfg(all(not(target_os = "windows"), any(feature = "esplora-blocking", feature = "esplora-async", feature = "electrum")))]
#[cfg(any(feature = "esplora-blocking", feature = "esplora-async"))]
use lightning_transaction_sync::EsploraSyncClient;
[package]
name = "lightning"
-version = "0.0.121"
+version = "0.0.123-beta"
authors = ["Matt Corallo"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/lightningdevkit/rust-lightning/"
# Override signing to not include randomness when generating signatures for test vectors.
_test_vectors = []
-no-std = ["hashbrown", "bitcoin/no-std", "core2/alloc", "libm"]
+no-std = ["hashbrown", "possiblyrandom", "bitcoin/no-std", "core2/alloc", "libm"]
std = ["bitcoin/std"]
# Generates low-r bitcoin signatures, which saves 1 byte in 50% of the cases
[dependencies]
bitcoin = { version = "0.30.2", default-features = false, features = ["secp-recovery"] }
-hashbrown = { version = "0.8", optional = true }
+hashbrown = { version = "0.13", optional = true, default-features = false }
+possiblyrandom = { version = "0.1", optional = true, default-features = false }
hex = { package = "hex-conservative", version = "0.1.1", default-features = false }
regex = { version = "1.5.6", optional = true }
backtrace = { version = "0.3", optional = true }
use bitcoin::secp256k1::{self, PublicKey, Secp256k1, SecretKey};
-use crate::blinded_path::{BlindedHop, BlindedPath};
+#[allow(unused_imports)]
+use crate::prelude::*;
+
+use crate::blinded_path::{BlindedHop, BlindedPath, IntroductionNode, NodeIdLookUp};
use crate::blinded_path::utils;
use crate::io;
use crate::io::Cursor;
use crate::ln::onion_utils;
use crate::onion_message::packet::ControlTlvs;
-use crate::prelude::*;
use crate::sign::{NodeSigner, Recipient};
use crate::crypto::streams::ChaChaPolyReadAdapter;
use crate::util::ser::{FixedLengthReader, LengthReadableArgs, Writeable, Writer};
/// TLVs to encode in an intermediate onion message packet's hop data. When provided in a blinded
/// route, they are encoded into [`BlindedHop::encrypted_payload`].
pub(crate) struct ForwardTlvs {
- /// The node id of the next hop in the onion message's path.
- pub(crate) next_node_id: PublicKey,
+ /// The next hop in the onion message's path.
+ pub(crate) next_hop: NextHop,
/// Senders to a blinded path use this value to concatenate the route they find to the
/// introduction node with the blinded path.
pub(crate) next_blinding_override: Option<PublicKey>,
pub(crate) path_id: Option<[u8; 32]>,
}
+/// The next hop to forward the onion message along its path.
+#[derive(Debug)]
+pub enum NextHop {
+ /// The node id of the next hop.
+ NodeId(PublicKey),
+ /// The short channel id leading to the next hop.
+ ShortChannelId(u64),
+}
+
impl Writeable for ForwardTlvs {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+ let (next_node_id, short_channel_id) = match self.next_hop {
+ NextHop::NodeId(pubkey) => (Some(pubkey), None),
+ NextHop::ShortChannelId(scid) => (None, Some(scid)),
+ };
// TODO: write padding
encode_tlv_stream!(writer, {
- (4, self.next_node_id, required),
+ (2, short_channel_id, option),
+ (4, next_node_id, option),
(8, self.next_blinding_override, option)
});
Ok(())
) -> Result<Vec<BlindedHop>, secp256k1::Error> {
let blinded_tlvs = unblinded_path.iter()
.skip(1) // The first node's TLVs contains the next node's pubkey
- .map(|pk| {
- ControlTlvs::Forward(ForwardTlvs { next_node_id: *pk, next_blinding_override: None })
- })
+ .map(|pk| ForwardTlvs { next_hop: NextHop::NodeId(*pk), next_blinding_override: None })
+ .map(|tlvs| ControlTlvs::Forward(tlvs))
.chain(core::iter::once(ControlTlvs::Receive(ReceiveTlvs { path_id: None })));
utils::construct_blinded_hops(secp_ctx, unblinded_path.iter(), blinded_tlvs, session_priv)
// Advance the blinded onion message path by one hop, so make the second hop into the new
// introduction node.
-pub(crate) fn advance_path_by_one<NS: Deref, T: secp256k1::Signing + secp256k1::Verification>(
- path: &mut BlindedPath, node_signer: &NS, secp_ctx: &Secp256k1<T>
-) -> Result<(), ()> where NS::Target: NodeSigner {
+pub(crate) fn advance_path_by_one<NS: Deref, NL: Deref, T>(
+ path: &mut BlindedPath, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1<T>
+) -> Result<(), ()>
+where
+ NS::Target: NodeSigner,
+ NL::Target: NodeIdLookUp,
+ T: secp256k1::Signing + secp256k1::Verification,
+{
let control_tlvs_ss = node_signer.ecdh(Recipient::Node, &path.blinding_point, None)?;
let rho = onion_utils::gen_rho_from_shared_secret(&control_tlvs_ss.secret_bytes());
let encrypted_control_tlvs = path.blinded_hops.remove(0).encrypted_payload;
let mut s = Cursor::new(&encrypted_control_tlvs);
let mut reader = FixedLengthReader::new(&mut s, encrypted_control_tlvs.len() as u64);
match ChaChaPolyReadAdapter::read(&mut reader, rho) {
- Ok(ChaChaPolyReadAdapter { readable: ControlTlvs::Forward(ForwardTlvs {
- mut next_node_id, next_blinding_override,
- })}) => {
+ Ok(ChaChaPolyReadAdapter {
+ readable: ControlTlvs::Forward(ForwardTlvs { next_hop, next_blinding_override })
+ }) => {
+ let next_node_id = match next_hop {
+ NextHop::NodeId(pubkey) => pubkey,
+ NextHop::ShortChannelId(scid) => match node_id_lookup.next_node_id(scid) {
+ Some(pubkey) => pubkey,
+ None => return Err(()),
+ },
+ };
let mut new_blinding_point = match next_blinding_override {
Some(blinding_point) => blinding_point,
None => {
}
};
mem::swap(&mut path.blinding_point, &mut new_blinding_point);
- mem::swap(&mut path.introduction_node_id, &mut next_node_id);
+ path.introduction_node = IntroductionNode::NodeId(next_node_id);
Ok(())
},
_ => Err(())
use crate::ln::msgs::DecodeError;
use crate::offers::invoice::BlindedPayInfo;
+use crate::routing::gossip::{NodeId, ReadOnlyNetworkGraph};
use crate::sign::EntropySource;
use crate::util::ser::{Readable, Writeable, Writer};
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub struct BlindedPath {
/// To send to a blinded path, the sender first finds a route to the unblinded
- /// `introduction_node_id`, which can unblind its [`encrypted_payload`] to find out the onion
+ /// `introduction_node`, which can unblind its [`encrypted_payload`] to find out the onion
/// message or payment's next hop and forward it along.
///
/// [`encrypted_payload`]: BlindedHop::encrypted_payload
- pub introduction_node_id: PublicKey,
+ pub introduction_node: IntroductionNode,
/// Used by the introduction node to decrypt its [`encrypted_payload`] to forward the onion
/// message or payment.
///
pub blinded_hops: Vec<BlindedHop>,
}
+/// The unblinded node in a [`BlindedPath`].
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+pub enum IntroductionNode {
+ /// The node id of the introduction node.
+ NodeId(PublicKey),
+ /// The short channel id of the channel leading to the introduction node. The [`Direction`]
+ /// identifies which side of the channel is the introduction node.
+ DirectedShortChannelId(Direction, u64),
+}
+
+/// The side of a channel that is the [`IntroductionNode`] in a [`BlindedPath`]. [BOLT 7] defines
+/// which nodes is which in the [`ChannelAnnouncement`] message.
+///
+/// [BOLT 7]: https://github.com/lightning/bolts/blob/master/07-routing-gossip.md#the-channel_announcement-message
+/// [`ChannelAnnouncement`]: crate::ln::msgs::ChannelAnnouncement
+#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
+pub enum Direction {
+ /// The lesser node id when compared lexicographically in ascending order.
+ NodeOne,
+ /// The greater node id when compared lexicographically in ascending order.
+ NodeTwo,
+}
+
+/// An interface for looking up the node id of a channel counterparty for the purpose of forwarding
+/// an [`OnionMessage`].
+///
+/// [`OnionMessage`]: crate::ln::msgs::OnionMessage
+pub trait NodeIdLookUp {
+ /// Returns the node id of the forwarding node's channel counterparty with `short_channel_id`.
+ ///
+ /// Here, the forwarding node is referring to the node of the [`OnionMessenger`] parameterized
+ /// by the [`NodeIdLookUp`] and the counterparty to one of that node's peers.
+ ///
+ /// [`OnionMessenger`]: crate::onion_message::messenger::OnionMessenger
+ fn next_node_id(&self, short_channel_id: u64) -> Option<PublicKey>;
+}
+
+/// A [`NodeIdLookUp`] that always returns `None`.
+pub struct EmptyNodeIdLookUp {}
+
+impl NodeIdLookUp for EmptyNodeIdLookUp {
+ fn next_node_id(&self, _short_channel_id: u64) -> Option<PublicKey> {
+ None
+ }
+}
+
/// An encrypted payload and node id corresponding to a hop in a payment or onion message path, to
/// be encoded in the sender's onion packet. These hops cannot be identified by outside observers
/// and thus can be used to hide the identity of the recipient.
if node_pks.is_empty() { return Err(()) }
let blinding_secret_bytes = entropy_source.get_secure_random_bytes();
let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted");
- let introduction_node_id = node_pks[0];
+ let introduction_node = IntroductionNode::NodeId(node_pks[0]);
Ok(BlindedPath {
- introduction_node_id,
+ introduction_node,
blinding_point: PublicKey::from_secret_key(secp_ctx, &blinding_secret),
blinded_hops: message::blinded_hops(secp_ctx, node_pks, &blinding_secret).map_err(|_| ())?,
})
/// Create a one-hop blinded path for a payment.
pub fn one_hop_for_payment<ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification>(
- payee_node_id: PublicKey, payee_tlvs: payment::ReceiveTlvs, entropy_source: &ES,
- secp_ctx: &Secp256k1<T>
+ payee_node_id: PublicKey, payee_tlvs: payment::ReceiveTlvs, min_final_cltv_expiry_delta: u16,
+ entropy_source: &ES, secp_ctx: &Secp256k1<T>
) -> Result<(BlindedPayInfo, Self), ()> {
// This value is not considered in pathfinding for 1-hop blinded paths, because it's intended to
// be in relation to a specific channel.
let htlc_maximum_msat = u64::max_value();
Self::new_for_payment(
- &[], payee_node_id, payee_tlvs, htlc_maximum_msat, entropy_source, secp_ctx
+ &[], payee_node_id, payee_tlvs, htlc_maximum_msat, min_final_cltv_expiry_delta,
+ entropy_source, secp_ctx
)
}
// TODO: make all payloads the same size with padding + add dummy hops
pub fn new_for_payment<ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification>(
intermediate_nodes: &[payment::ForwardNode], payee_node_id: PublicKey,
- payee_tlvs: payment::ReceiveTlvs, htlc_maximum_msat: u64, entropy_source: &ES,
- secp_ctx: &Secp256k1<T>
+ payee_tlvs: payment::ReceiveTlvs, htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16,
+ entropy_source: &ES, secp_ctx: &Secp256k1<T>
) -> Result<(BlindedPayInfo, Self), ()> {
+ let introduction_node = IntroductionNode::NodeId(
+ intermediate_nodes.first().map_or(payee_node_id, |n| n.node_id)
+ );
let blinding_secret_bytes = entropy_source.get_secure_random_bytes();
let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted");
- let blinded_payinfo = payment::compute_payinfo(intermediate_nodes, &payee_tlvs, htlc_maximum_msat)?;
+ let blinded_payinfo = payment::compute_payinfo(
+ intermediate_nodes, &payee_tlvs, htlc_maximum_msat, min_final_cltv_expiry_delta
+ )?;
Ok((blinded_payinfo, BlindedPath {
- introduction_node_id: intermediate_nodes.first().map_or(payee_node_id, |n| n.node_id),
+ introduction_node,
blinding_point: PublicKey::from_secret_key(secp_ctx, &blinding_secret),
blinded_hops: payment::blinded_hops(
secp_ctx, intermediate_nodes, payee_node_id, payee_tlvs, &blinding_secret
).map_err(|_| ())?,
}))
}
+
+ /// Returns the introduction [`NodeId`] of the blinded path, if it is publicly reachable (i.e.,
+ /// it is found in the network graph).
+ pub fn public_introduction_node_id<'a>(
+ &self, network_graph: &'a ReadOnlyNetworkGraph
+ ) -> Option<&'a NodeId> {
+ match &self.introduction_node {
+ IntroductionNode::NodeId(pubkey) => {
+ let node_id = NodeId::from_pubkey(pubkey);
+ network_graph.nodes().get_key_value(&node_id).map(|(key, _)| key)
+ },
+ IntroductionNode::DirectedShortChannelId(direction, scid) => {
+ network_graph
+ .channel(*scid)
+ .map(|c| match direction {
+ Direction::NodeOne => &c.node_one,
+ Direction::NodeTwo => &c.node_two,
+ })
+ },
+ }
+ }
}
impl Writeable for BlindedPath {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
- self.introduction_node_id.write(w)?;
+ match &self.introduction_node {
+ IntroductionNode::NodeId(pubkey) => pubkey.write(w)?,
+ IntroductionNode::DirectedShortChannelId(direction, scid) => {
+ match direction {
+ Direction::NodeOne => 0u8.write(w)?,
+ Direction::NodeTwo => 1u8.write(w)?,
+ }
+ scid.write(w)?;
+ },
+ }
+
self.blinding_point.write(w)?;
(self.blinded_hops.len() as u8).write(w)?;
for hop in &self.blinded_hops {
impl Readable for BlindedPath {
fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
- let introduction_node_id = Readable::read(r)?;
+ let mut first_byte: u8 = Readable::read(r)?;
+ let introduction_node = match first_byte {
+ 0 => IntroductionNode::DirectedShortChannelId(Direction::NodeOne, Readable::read(r)?),
+ 1 => IntroductionNode::DirectedShortChannelId(Direction::NodeTwo, Readable::read(r)?),
+ 2|3 => {
+ use io::Read;
+ let mut pubkey_read = core::slice::from_mut(&mut first_byte).chain(r.by_ref());
+ IntroductionNode::NodeId(Readable::read(&mut pubkey_read)?)
+ },
+ _ => return Err(DecodeError::InvalidValue),
+ };
let blinding_point = Readable::read(r)?;
let num_hops: u8 = Readable::read(r)?;
if num_hops == 0 { return Err(DecodeError::InvalidValue) }
blinded_hops.push(Readable::read(r)?);
}
Ok(BlindedPath {
- introduction_node_id,
+ introduction_node,
blinding_point,
blinded_hops,
})
encrypted_payload
});
+impl Direction {
+ /// Returns the [`NodeId`] from the inputs corresponding to the direction.
+ pub fn select_node_id<'a>(&self, node_a: &'a NodeId, node_b: &'a NodeId) -> &'a NodeId {
+ match self {
+ Direction::NodeOne => core::cmp::min(node_a, node_b),
+ Direction::NodeTwo => core::cmp::max(node_a, node_b),
+ }
+ }
+
+ /// Returns the [`PublicKey`] from the inputs corresponding to the direction.
+ pub fn select_pubkey<'a>(&self, node_a: &'a PublicKey, node_b: &'a PublicKey) -> &'a PublicKey {
+ let (node_one, node_two) = if NodeId::from_pubkey(node_a) < NodeId::from_pubkey(node_b) {
+ (node_a, node_b)
+ } else {
+ (node_b, node_a)
+ };
+ match self {
+ Direction::NodeOne => node_one,
+ Direction::NodeTwo => node_two,
+ }
+ }
+}
use crate::ln::features::BlindedHopFeatures;
use crate::ln::msgs::DecodeError;
use crate::offers::invoice::BlindedPayInfo;
-use crate::prelude::*;
-use crate::util::ser::{Readable, Writeable, Writer};
+use crate::offers::invoice_request::InvoiceRequestFields;
+use crate::offers::offer::OfferId;
+use crate::util::ser::{HighZeroBytesDroppedBigSize, Readable, Writeable, Writer};
-use core::convert::TryFrom;
+#[allow(unused_imports)]
+use crate::prelude::*;
/// An intermediate node, its outbound channel, and relay parameters.
#[derive(Clone, Debug)]
pub payment_secret: PaymentSecret,
/// Constraints for the receiver of this payment.
pub payment_constraints: PaymentConstraints,
+ /// Context for the receiver of this payment.
+ pub payment_context: PaymentContext,
}
/// Data to construct a [`BlindedHop`] for sending a payment over.
pub htlc_minimum_msat: u64,
}
+/// The context of an inbound payment, which is included in a [`BlindedPath`] via [`ReceiveTlvs`]
+/// and surfaced in [`PaymentPurpose`].
+///
+/// [`BlindedPath`]: crate::blinded_path::BlindedPath
+/// [`PaymentPurpose`]: crate::events::PaymentPurpose
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub enum PaymentContext {
+ /// The payment context was unknown.
+ Unknown(UnknownPaymentContext),
+
+ /// The payment was made for an invoice requested from a BOLT 12 [`Offer`].
+ ///
+ /// [`Offer`]: crate::offers::offer::Offer
+ Bolt12Offer(Bolt12OfferContext),
+
+ /// The payment was made for an invoice sent for a BOLT 12 [`Refund`].
+ ///
+ /// [`Refund`]: crate::offers::refund::Refund
+ Bolt12Refund(Bolt12RefundContext),
+}
+
+// Used when writing PaymentContext in Event::PaymentClaimable to avoid cloning.
+pub(crate) enum PaymentContextRef<'a> {
+ Bolt12Offer(&'a Bolt12OfferContext),
+ Bolt12Refund(&'a Bolt12RefundContext),
+}
+
+/// An unknown payment context.
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub struct UnknownPaymentContext(());
+
+/// The context of a payment made for an invoice requested from a BOLT 12 [`Offer`].
+///
+/// [`Offer`]: crate::offers::offer::Offer
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub struct Bolt12OfferContext {
+ /// The identifier of the [`Offer`].
+ ///
+ /// [`Offer`]: crate::offers::offer::Offer
+ pub offer_id: OfferId,
+
+ /// Fields from an [`InvoiceRequest`] sent for a [`Bolt12Invoice`].
+ ///
+ /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
+ /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
+ pub invoice_request: InvoiceRequestFields,
+}
+
+/// The context of a payment made for an invoice sent for a BOLT 12 [`Refund`].
+///
+/// [`Refund`]: crate::offers::refund::Refund
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub struct Bolt12RefundContext {}
+
+impl PaymentContext {
+ pub(crate) fn unknown() -> Self {
+ PaymentContext::Unknown(UnknownPaymentContext(()))
+ }
+}
+
impl TryFrom<CounterpartyForwardingInfo> for PaymentRelay {
type Error = ();
impl Writeable for ForwardTlvs {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ let features_opt =
+ if self.features == BlindedHopFeatures::empty() { None }
+ else { Some(&self.features) };
encode_tlv_stream!(w, {
(2, self.short_channel_id, required),
(10, self.payment_relay, required),
(12, self.payment_constraints, required),
- (14, self.features, required)
+ (14, features_opt, option)
});
Ok(())
}
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
encode_tlv_stream!(w, {
(12, self.payment_constraints, required),
- (65536, self.payment_secret, required)
+ (65536, self.payment_secret, required),
+ (65537, self.payment_context, required)
});
Ok(())
}
(12, payment_constraints, required),
(14, features, option),
(65536, payment_secret, option),
+ (65537, payment_context, (default_value, PaymentContext::unknown())),
});
let _padding: Option<utils::Padding> = _padding;
if let Some(short_channel_id) = scid {
- if payment_secret.is_some() { return Err(DecodeError::InvalidValue) }
+ if payment_secret.is_some() {
+ return Err(DecodeError::InvalidValue)
+ }
Ok(BlindedPaymentTlvs::Forward(ForwardTlvs {
short_channel_id,
payment_relay: payment_relay.ok_or(DecodeError::InvalidValue)?,
payment_constraints: payment_constraints.0.unwrap(),
- features: features.ok_or(DecodeError::InvalidValue)?,
+ features: features.unwrap_or_else(BlindedHopFeatures::empty),
}))
} else {
if payment_relay.is_some() || features.is_some() { return Err(DecodeError::InvalidValue) }
Ok(BlindedPaymentTlvs::Receive(ReceiveTlvs {
payment_secret: payment_secret.ok_or(DecodeError::InvalidValue)?,
payment_constraints: payment_constraints.0.unwrap(),
+ payment_context: payment_context.0.unwrap(),
}))
}
}
}
pub(super) fn compute_payinfo(
- intermediate_nodes: &[ForwardNode], payee_tlvs: &ReceiveTlvs, payee_htlc_maximum_msat: u64
+ intermediate_nodes: &[ForwardNode], payee_tlvs: &ReceiveTlvs, payee_htlc_maximum_msat: u64,
+ min_final_cltv_expiry_delta: u16
) -> Result<BlindedPayInfo, ()> {
let mut curr_base_fee: u64 = 0;
let mut curr_prop_mil: u64 = 0;
- let mut cltv_expiry_delta: u16 = 0;
+ let mut cltv_expiry_delta: u16 = min_final_cltv_expiry_delta;
for tlvs in intermediate_nodes.iter().rev().map(|n| &n.tlvs) {
// In the future, we'll want to take the intersection of all supported features for the
// `BlindedPayInfo`, but there are no features in that context right now.
})
}
-impl_writeable_msg!(PaymentRelay, {
- cltv_expiry_delta,
- fee_proportional_millionths,
- fee_base_msat
-}, {});
+impl Writeable for PaymentRelay {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ self.cltv_expiry_delta.write(w)?;
+ self.fee_proportional_millionths.write(w)?;
+ HighZeroBytesDroppedBigSize(self.fee_base_msat).write(w)
+ }
+}
+impl Readable for PaymentRelay {
+ fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let cltv_expiry_delta: u16 = Readable::read(r)?;
+ let fee_proportional_millionths: u32 = Readable::read(r)?;
+ let fee_base_msat: HighZeroBytesDroppedBigSize<u32> = Readable::read(r)?;
+ Ok(Self { cltv_expiry_delta, fee_proportional_millionths, fee_base_msat: fee_base_msat.0 })
+ }
+}
+
+impl Writeable for PaymentConstraints {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ self.max_cltv_expiry.write(w)?;
+ HighZeroBytesDroppedBigSize(self.htlc_minimum_msat).write(w)
+ }
+}
+impl Readable for PaymentConstraints {
+ fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let max_cltv_expiry: u32 = Readable::read(r)?;
+ let htlc_minimum_msat: HighZeroBytesDroppedBigSize<u64> = Readable::read(r)?;
+ Ok(Self { max_cltv_expiry, htlc_minimum_msat: htlc_minimum_msat.0 })
+ }
+}
+
+impl_writeable_tlv_based_enum!(PaymentContext,
+ ;
+ (0, Unknown),
+ (1, Bolt12Offer),
+ (2, Bolt12Refund),
+);
+
+impl<'a> Writeable for PaymentContextRef<'a> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ match self {
+ PaymentContextRef::Bolt12Offer(context) => {
+ 1u8.write(w)?;
+ context.write(w)?;
+ },
+ PaymentContextRef::Bolt12Refund(context) => {
+ 2u8.write(w)?;
+ context.write(w)?;
+ },
+ }
+
+ Ok(())
+ }
+}
+
+impl Writeable for UnknownPaymentContext {
+ fn write<W: Writer>(&self, _w: &mut W) -> Result<(), io::Error> {
+ Ok(())
+ }
+}
+
+impl Readable for UnknownPaymentContext {
+ fn read<R: io::Read>(_r: &mut R) -> Result<Self, DecodeError> {
+ Ok(UnknownPaymentContext(()))
+ }
+}
+
+impl_writeable_tlv_based!(Bolt12OfferContext, {
+ (0, offer_id, required),
+ (2, invoice_request, required),
+});
-impl_writeable_msg!(PaymentConstraints, {
- max_cltv_expiry,
- htlc_minimum_msat
-}, {});
+impl_writeable_tlv_based!(Bolt12RefundContext, {});
#[cfg(test)]
mod tests {
use bitcoin::secp256k1::PublicKey;
- use crate::blinded_path::payment::{ForwardNode, ForwardTlvs, ReceiveTlvs, PaymentConstraints, PaymentRelay};
+ use crate::blinded_path::payment::{ForwardNode, ForwardTlvs, ReceiveTlvs, PaymentConstraints, PaymentContext, PaymentRelay};
use crate::ln::PaymentSecret;
use crate::ln::features::BlindedHopFeatures;
+ use crate::ln::functional_test_utils::TEST_FINAL_CLTV;
#[test]
fn compute_payinfo() {
max_cltv_expiry: 0,
htlc_minimum_msat: 1,
},
+ payment_context: PaymentContext::unknown(),
};
let htlc_maximum_msat = 100_000;
- let blinded_payinfo = super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, htlc_maximum_msat).unwrap();
+ let blinded_payinfo = super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, htlc_maximum_msat, 12).unwrap();
assert_eq!(blinded_payinfo.fee_base_msat, 201);
assert_eq!(blinded_payinfo.fee_proportional_millionths, 1001);
- assert_eq!(blinded_payinfo.cltv_expiry_delta, 288);
+ assert_eq!(blinded_payinfo.cltv_expiry_delta, 300);
assert_eq!(blinded_payinfo.htlc_minimum_msat, 900);
assert_eq!(blinded_payinfo.htlc_maximum_msat, htlc_maximum_msat);
}
max_cltv_expiry: 0,
htlc_minimum_msat: 1,
},
+ payment_context: PaymentContext::unknown(),
};
- let blinded_payinfo = super::compute_payinfo(&[], &recv_tlvs, 4242).unwrap();
+ let blinded_payinfo = super::compute_payinfo(&[], &recv_tlvs, 4242, TEST_FINAL_CLTV as u16).unwrap();
assert_eq!(blinded_payinfo.fee_base_msat, 0);
assert_eq!(blinded_payinfo.fee_proportional_millionths, 0);
- assert_eq!(blinded_payinfo.cltv_expiry_delta, 0);
+ assert_eq!(blinded_payinfo.cltv_expiry_delta, TEST_FINAL_CLTV as u16);
assert_eq!(blinded_payinfo.htlc_minimum_msat, 1);
assert_eq!(blinded_payinfo.htlc_maximum_msat, 4242);
}
max_cltv_expiry: 0,
htlc_minimum_msat: 3,
},
+ payment_context: PaymentContext::unknown(),
};
let htlc_maximum_msat = 100_000;
- let blinded_payinfo = super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, htlc_maximum_msat).unwrap();
+ let blinded_payinfo = super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, htlc_maximum_msat, TEST_FINAL_CLTV as u16).unwrap();
assert_eq!(blinded_payinfo.htlc_minimum_msat, 2_000);
}
max_cltv_expiry: 0,
htlc_minimum_msat: 1,
},
+ payment_context: PaymentContext::unknown(),
};
let htlc_minimum_msat = 3798;
- assert!(super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, htlc_minimum_msat - 1).is_err());
+ assert!(super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, htlc_minimum_msat - 1, TEST_FINAL_CLTV as u16).is_err());
let htlc_maximum_msat = htlc_minimum_msat + 1;
- let blinded_payinfo = super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, htlc_maximum_msat).unwrap();
+ let blinded_payinfo = super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, htlc_maximum_msat, TEST_FINAL_CLTV as u16).unwrap();
assert_eq!(blinded_payinfo.htlc_minimum_msat, htlc_minimum_msat);
assert_eq!(blinded_payinfo.htlc_maximum_msat, htlc_maximum_msat);
}
max_cltv_expiry: 0,
htlc_minimum_msat: 1,
},
+ payment_context: PaymentContext::unknown(),
};
- let blinded_payinfo = super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, 10_000).unwrap();
+ let blinded_payinfo = super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, 10_000, TEST_FINAL_CLTV as u16).unwrap();
assert_eq!(blinded_payinfo.htlc_maximum_msat, 3997);
}
}
use crate::util::ser::{Readable, Writeable};
use crate::io;
+
+#[allow(unused_imports)]
use crate::prelude::*;
// TODO: DRY with onion_utils::construct_onion_keys_callback
//! disconnections, transaction broadcasting, and feerate information requests.
use core::{cmp, ops::Deref};
-use core::convert::TryInto;
+
+use crate::prelude::*;
use bitcoin::blockdata::transaction::Transaction;
///
/// [`ChannelManager::close_channel_with_feerate_and_script`]: crate::ln::channelmanager::ChannelManager::close_channel_with_feerate_and_script
ChannelCloseMinimum,
+ /// The feerate [`OutputSweeper`] will use on transactions spending
+ /// [`SpendableOutputDescriptor`]s after a channel closure.
+ ///
+ /// Generally spending these outputs is safe as long as they eventually confirm, so a value
+ /// (slightly above) the mempool minimum should suffice. However, as this value will influence
+ /// how long funds will be unavailable after channel closure, [`FeeEstimator`] implementors
+ /// might want to choose a higher feerate to regain control over funds faster.
+ ///
+ /// [`OutputSweeper`]: crate::util::sweep::OutputSweeper
+ /// [`SpendableOutputDescriptor`]: crate::sign::SpendableOutputDescriptor
+ OutputSpendingFee,
}
/// A trait which should be implemented to provide feerate information on a number of time
use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, WithChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS};
use crate::chain::transaction::{OutPoint, TransactionData};
+use crate::ln::ChannelId;
use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
use crate::events;
use crate::events::{Event, EventHandler};
use crate::prelude::*;
use crate::sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard};
-use core::iter::FromIterator;
use core::ops::Deref;
use core::sync::atomic::{AtomicUsize, Ordering};
use bitcoin::secp256k1::PublicKey;
///
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
/// [`Writeable::write`]: crate::util::ser::Writeable::write
- fn persist_new_channel(&self, channel_id: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
+ fn persist_new_channel(&self, channel_funding_outpoint: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
/// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
/// update.
/// [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
///
/// [`Writeable::write`]: crate::util::ser::Writeable::write
- fn update_persisted_channel(&self, channel_id: OutPoint, update: Option<&ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
+ fn update_persisted_channel(&self, channel_funding_outpoint: OutPoint, update: Option<&ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
+ /// Prevents the channel monitor from being loaded on startup.
+ ///
+ /// Archiving the data in a backup location (rather than deleting it fully) is useful for
+ /// hedging against data loss in case of unexpected failure.
+ fn archive_persisted_channel(&self, channel_funding_outpoint: OutPoint);
}
struct MonitorHolder<ChannelSigner: WriteableEcdsaChannelSigner> {
persister: P,
/// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly
/// from the user and not from a [`ChannelMonitor`].
- pending_monitor_events: Mutex<Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>>,
+ pending_monitor_events: Mutex<Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)>>,
/// The best block height seen, used as a proxy for the passage of time.
highest_chain_height: AtomicUsize,
FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs>
{
let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
- let funding_outpoints: HashSet<OutPoint> = HashSet::from_iter(self.monitors.read().unwrap().keys().cloned());
+ let funding_outpoints = hash_set_from_iter(self.monitors.read().unwrap().keys().cloned());
for funding_outpoint in funding_outpoints.iter() {
let monitor_lock = self.monitors.read().unwrap();
if let Some(monitor_state) = monitor_lock.get(funding_outpoint) {
let mut txn_outputs;
{
txn_outputs = process(monitor, txdata);
+ let chain_sync_update_id = self.sync_persistence_id.get_increment();
let update_id = MonitorUpdateId {
- contents: UpdateOrigin::ChainSync(self.sync_persistence_id.get_increment()),
+ contents: UpdateOrigin::ChainSync(chain_sync_update_id),
};
let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
if let Some(height) = best_height {
}
}
- log_trace!(logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
+ log_trace!(logger, "Syncing Channel Monitor for channel {} for block-data update_id {}",
+ log_funding_info!(monitor),
+ chain_sync_update_id
+ );
match self.persister.update_persisted_channel(*funding_outpoint, None, monitor, update_id) {
ChannelMonitorUpdateStatus::Completed =>
- log_trace!(logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
+ log_trace!(logger, "Finished syncing Channel Monitor for channel {} for block-data update_id {}",
+ log_funding_info!(monitor),
+ chain_sync_update_id
+ ),
ChannelMonitorUpdateStatus::InProgress => {
log_debug!(logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
pending_monitor_updates.push(update_id);
/// transactions relevant to the watched channels.
pub fn new(chain_source: Option<C>, broadcaster: T, logger: L, feeest: F, persister: P) -> Self {
Self {
- monitors: RwLock::new(HashMap::new()),
+ monitors: RwLock::new(new_hash_map()),
sync_persistence_id: AtomicCounter::new(),
chain_source,
broadcaster,
}
}
- /// Lists the funding outpoint of each [`ChannelMonitor`] being monitored.
+ /// Lists the funding outpoint and channel ID of each [`ChannelMonitor`] being monitored.
///
/// Note that [`ChannelMonitor`]s are not removed when a channel is closed as they are always
/// monitoring for on-chain state resolutions.
- pub fn list_monitors(&self) -> Vec<OutPoint> {
- self.monitors.read().unwrap().keys().map(|outpoint| *outpoint).collect()
+ pub fn list_monitors(&self) -> Vec<(OutPoint, ChannelId)> {
+ self.monitors.read().unwrap().iter().map(|(outpoint, monitor_holder)| {
+ let channel_id = monitor_holder.monitor.channel_id();
+ (*outpoint, channel_id)
+ }).collect()
}
#[cfg(not(c_bindings))]
/// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored).
pub fn list_pending_monitor_updates(&self) -> HashMap<OutPoint, Vec<MonitorUpdateId>> {
- self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
+ hash_map_from_iter(self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
(*outpoint, holder.pending_monitor_updates.lock().unwrap().clone())
- }).collect()
+ }))
}
#[cfg(c_bindings)]
pending_monitor_updates.retain(|update_id| *update_id != completed_update_id);
match completed_update_id {
- MonitorUpdateId { contents: UpdateOrigin::OffChain(_) } => {
+ MonitorUpdateId { contents: UpdateOrigin::OffChain(completed_update_id) } => {
// Note that we only check for `UpdateOrigin::OffChain` failures here - if
// we're being told that a `UpdateOrigin::OffChain` monitor update completed,
// we only care about ensuring we don't tell the `ChannelManager` to restore
// `MonitorEvent`s from the monitor back to the `ChannelManager` until they
// complete.
let monitor_is_pending_updates = monitor_data.has_pending_offchain_updates(&pending_monitor_updates);
+ log_debug!(self.logger, "Completed off-chain monitor update {} for channel with funding outpoint {:?}, {}",
+ completed_update_id,
+ funding_txo,
+ if monitor_is_pending_updates {
+ "still have pending off-chain updates"
+ } else {
+ "all off-chain updates complete, returning a MonitorEvent"
+ });
if monitor_is_pending_updates {
// If there are still monitor updates pending, we cannot yet construct a
// Completed event.
return Ok(());
}
- self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
- funding_txo,
+ let channel_id = monitor_data.monitor.channel_id();
+ self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
+ funding_txo, channel_id,
monitor_update_id: monitor_data.monitor.get_latest_update_id(),
}], monitor_data.monitor.get_counterparty_node_id()));
},
- MonitorUpdateId { contents: UpdateOrigin::ChainSync(_) } => {
- if !monitor_data.has_pending_chainsync_updates(&pending_monitor_updates) {
+ MonitorUpdateId { contents: UpdateOrigin::ChainSync(completed_update_id) } => {
+ let monitor_has_pending_updates =
+ monitor_data.has_pending_chainsync_updates(&pending_monitor_updates);
+ log_debug!(self.logger, "Completed chain sync monitor update {} for channel with funding outpoint {:?}, {}",
+ completed_update_id,
+ funding_txo,
+ if monitor_has_pending_updates {
+ "still have pending chain sync updates"
+ } else {
+ "all chain sync updates complete, releasing pending MonitorEvents"
+ });
+ if !monitor_has_pending_updates {
monitor_data.last_chain_persist_height.store(self.highest_chain_height.load(Ordering::Acquire), Ordering::Release);
// The next time release_pending_monitor_events is called, any events for this
// ChannelMonitor will be returned.
#[cfg(any(test, fuzzing))]
pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) {
let monitors = self.monitors.read().unwrap();
- let counterparty_node_id = monitors.get(&funding_txo).and_then(|m| m.monitor.get_counterparty_node_id());
- self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed {
+ let (counterparty_node_id, channel_id) = if let Some(m) = monitors.get(&funding_txo) {
+ (m.monitor.get_counterparty_node_id(), m.monitor.channel_id())
+ } else {
+ (None, ChannelId::v1_from_funding_outpoint(funding_txo))
+ };
+ self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
funding_txo,
+ channel_id,
monitor_update_id,
}], counterparty_node_id));
self.event_notifier.notify();
)
}
}
+
+ /// Triggers rebroadcasts of pending claims from force-closed channels after a transaction
+ /// signature generation failure.
+ ///
+ /// `monitor_opt` can be used as a filter to only trigger them for a specific channel monitor.
+ pub fn signer_unblocked(&self, monitor_opt: Option<OutPoint>) {
+ let monitors = self.monitors.read().unwrap();
+ if let Some(funding_txo) = monitor_opt {
+ if let Some(monitor_holder) = monitors.get(&funding_txo) {
+ monitor_holder.monitor.signer_unblocked(
+ &*self.broadcaster, &*self.fee_estimator, &self.logger
+ )
+ }
+ } else {
+ for (_, monitor_holder) in &*monitors {
+ monitor_holder.monitor.signer_unblocked(
+ &*self.broadcaster, &*self.fee_estimator, &self.logger
+ )
+ }
+ }
+ }
+
+ /// Archives fully resolved channel monitors by calling [`Persist::archive_persisted_channel`].
+ ///
+ /// This is useful for pruning fully resolved monitors from the monitor set and primary
+ /// storage so they are not kept in memory and reloaded on restart.
+ ///
+ /// Should be called occasionally (once every handful of blocks or on startup).
+ ///
+ /// Depending on the implementation of [`Persist::archive_persisted_channel`] the monitor
+ /// data could be moved to an archive location or removed entirely.
+ pub fn archive_fully_resolved_channel_monitors(&self) {
+ let mut have_monitors_to_prune = false;
+ for (_, monitor_holder) in self.monitors.read().unwrap().iter() {
+ let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor);
+ if monitor_holder.monitor.is_fully_resolved(&logger) {
+ have_monitors_to_prune = true;
+ }
+ }
+ if have_monitors_to_prune {
+ let mut monitors = self.monitors.write().unwrap();
+ monitors.retain(|funding_txo, monitor_holder| {
+ let logger = WithChannelMonitor::from(&self.logger, &monitor_holder.monitor);
+ if monitor_holder.monitor.is_fully_resolved(&logger) {
+ log_info!(logger,
+ "Archiving fully resolved ChannelMonitor for funding txo {}",
+ funding_txo
+ );
+ self.persister.archive_persisted_channel(*funding_txo);
+ false
+ } else {
+ true
+ }
+ });
+ }
+ }
}
impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref>
}
fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus {
+ // `ChannelMonitorUpdate`'s `channel_id` is `None` prior to 0.0.121 and all channels in those
+ // versions are V1-established. For 0.0.121+ the `channel_id` fields is always `Some`.
+ let channel_id = update.channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo));
// Update the monitor that watches the channel referred to by the given outpoint.
let monitors = self.monitors.read().unwrap();
match monitors.get(&funding_txo) {
None => {
- let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(funding_txo.to_channel_id()));
+ let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(channel_id));
log_error!(logger, "Failed to update channel monitor: no such monitor registered");
// We should never ever trigger this from within ChannelManager. Technically a
Some(monitor_state) => {
let monitor = &monitor_state.monitor;
let logger = WithChannelMonitor::from(&self.logger, &monitor);
- log_trace!(logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor));
+ log_trace!(logger, "Updating ChannelMonitor to id {} for channel {}", update.update_id, log_funding_info!(monitor));
let update_res = monitor.update_monitor(update, &self.broadcaster, &self.fee_estimator, &self.logger);
let update_id = MonitorUpdateId::from_monitor_update(update);
match persist_res {
ChannelMonitorUpdateStatus::InProgress => {
pending_monitor_updates.push(update_id);
- log_debug!(logger, "Persistence of ChannelMonitorUpdate for channel {} in progress", log_funding_info!(monitor));
+ log_debug!(logger,
+ "Persistence of ChannelMonitorUpdate id {:?} for channel {} in progress",
+ update_id,
+ log_funding_info!(monitor)
+ );
},
ChannelMonitorUpdateStatus::Completed => {
- log_debug!(logger, "Persistence of ChannelMonitorUpdate for channel {} completed", log_funding_info!(monitor));
+ log_debug!(logger,
+ "Persistence of ChannelMonitorUpdate id {:?} for channel {} completed",
+ update_id,
+ log_funding_info!(monitor)
+ );
},
ChannelMonitorUpdateStatus::UnrecoverableError => {
// Take the monitors lock for writing so that we poison it and any future
}
}
- fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
+ fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
for monitor_state in self.monitors.read().unwrap().values() {
let logger = WithChannelMonitor::from(&self.logger, &monitor_state.monitor);
let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();
if monitor_events.len() > 0 {
let monitor_outpoint = monitor_state.monitor.get_funding_txo().0;
+ let monitor_channel_id = monitor_state.monitor.channel_id();
let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id();
- pending_monitor_events.push((monitor_outpoint, monitor_events, counterparty_node_id));
+ pending_monitor_events.push((monitor_outpoint, monitor_channel_id, monitor_events, counterparty_node_id));
}
}
}
use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator};
use crate::chain::transaction::{OutPoint, TransactionData};
use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, SpendableOutputDescriptor, StaticPaymentOutputDescriptor, DelayedPaymentOutputDescriptor, ecdsa::WriteableEcdsaChannelSigner, SignerProvider, EntropySource};
-use crate::chain::onchaintx::{ClaimEvent, OnchainTxHandler};
+use crate::chain::onchaintx::{ClaimEvent, FeerateStrategy, OnchainTxHandler};
use crate::chain::package::{CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderFundingOutput, HolderHTLCOutput, PackageSolvingData, PackageTemplate, RevokedOutput, RevokedHTLCOutput};
use crate::chain::Filter;
use crate::util::logger::{Logger, Record};
use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, MaybeReadable, UpgradableRequired, Writer, Writeable, U48};
use crate::util::byte_utils;
-use crate::events::{Event, EventHandler};
+use crate::events::{ClosureReason, Event, EventHandler};
use crate::events::bump_transaction::{AnchorDescriptor, BumpTransactionEvent};
+#[allow(unused_imports)]
use crate::prelude::*;
+
use core::{cmp, mem};
use crate::io::{self, Error};
-use core::convert::TryInto;
use core::ops::Deref;
use crate::sync::{Mutex, LockTestExt};
///
/// [`ChannelMonitorUpdateStatus::InProgress`]: super::ChannelMonitorUpdateStatus::InProgress
pub update_id: u64,
+ /// The channel ID associated with these updates.
+ ///
+ /// Will be `None` for `ChannelMonitorUpdate`s constructed on LDK versions prior to 0.0.121 and
+ /// always `Some` otherwise.
+ pub channel_id: Option<ChannelId>,
}
/// The update ID used for a [`ChannelMonitorUpdate`] that is either:
}
write_tlv_fields!(w, {
(1, self.counterparty_node_id, option),
+ (3, self.channel_id, option),
});
Ok(())
}
}
}
let mut counterparty_node_id = None;
+ let mut channel_id = None;
read_tlv_fields!(r, {
(1, counterparty_node_id, option),
+ (3, channel_id, option),
});
- Ok(Self { update_id, counterparty_node_id, updates })
+ Ok(Self { update_id, counterparty_node_id, updates, channel_id })
}
}
/// A monitor event containing an HTLCUpdate.
HTLCEvent(HTLCUpdate),
+ /// Indicates we broadcasted the channel's latest commitment transaction and thus closed the
+ /// channel. Holds information about the channel and why it was closed.
+ HolderForceClosedWithInfo {
+ /// The reason the channel was closed.
+ reason: ClosureReason,
+ /// The funding outpoint of the channel.
+ outpoint: OutPoint,
+ /// The channel ID of the channel.
+ channel_id: ChannelId,
+ },
+
/// Indicates we broadcasted the channel's latest commitment transaction and thus closed the
/// channel.
HolderForceClosed(OutPoint),
Completed {
/// The funding outpoint of the [`ChannelMonitor`] that was updated
funding_txo: OutPoint,
+ /// The channel ID of the channel associated with the [`ChannelMonitor`]
+ channel_id: ChannelId,
/// The Update ID from [`ChannelMonitorUpdate::update_id`] which was applied or
/// [`ChannelMonitor::get_latest_update_id`].
///
(0, Completed) => {
(0, funding_txo, required),
(2, monitor_update_id, required),
+ (4, channel_id, required),
+ },
+ (5, HolderForceClosedWithInfo) => {
+ (0, reason, upgradable_required),
+ (2, outpoint, required),
+ (4, channel_id, required),
},
;
(2, HTLCEvent),
}
fn has_reached_confirmation_threshold(&self, best_block: &BestBlock) -> bool {
- best_block.height() >= self.confirmation_threshold()
+ best_block.height >= self.confirmation_threshold()
}
}
channel_keys_id: [u8; 32],
holder_revocation_basepoint: RevocationBasepoint,
+ channel_id: ChannelId,
funding_info: (OutPoint, ScriptBuf),
current_counterparty_commitment_txid: Option<Txid>,
prev_counterparty_commitment_txid: Option<Txid>,
/// Ordering of tuple data: (their_per_commitment_point, feerate_per_kw, to_broadcaster_sats,
/// to_countersignatory_sats)
initial_counterparty_commitment_info: Option<(PublicKey, u32, u64, u64)>,
+
+ /// The first block height at which we had no remaining claimable balances.
+ balances_empty_height: Option<u32>,
}
/// Transaction outputs to watch for on-chain spends.
writer.write_all(&(self.pending_monitor_events.iter().filter(|ev| match ev {
MonitorEvent::HTLCEvent(_) => true,
MonitorEvent::HolderForceClosed(_) => true,
+ MonitorEvent::HolderForceClosedWithInfo { .. } => true,
_ => false,
}).count() as u64).to_be_bytes())?;
for event in self.pending_monitor_events.iter() {
upd.write(writer)?;
},
MonitorEvent::HolderForceClosed(_) => 1u8.write(writer)?,
+ // `HolderForceClosedWithInfo` replaced `HolderForceClosed` in v0.0.122. To keep
+ // backwards compatibility, we write a `HolderForceClosed` event along with the
+ // `HolderForceClosedWithInfo` event. This is deduplicated in the reader.
+ MonitorEvent::HolderForceClosedWithInfo { .. } => 1u8.write(writer)?,
_ => {}, // Covered in the TLV writes below
}
}
event.write(writer)?;
}
- self.best_block.block_hash().write(writer)?;
- writer.write_all(&self.best_block.height().to_be_bytes())?;
+ self.best_block.block_hash.write(writer)?;
+ writer.write_all(&self.best_block.height.to_be_bytes())?;
writer.write_all(&(self.onchain_events_awaiting_threshold_conf.len() as u64).to_be_bytes())?;
for ref entry in self.onchain_events_awaiting_threshold_conf.iter() {
self.lockdown_from_offchain.write(writer)?;
self.holder_tx_signed.write(writer)?;
+ // If we have a `HolderForceClosedWithInfo` event, we need to write the `HolderForceClosed` for backwards compatibility.
+ let pending_monitor_events = match self.pending_monitor_events.iter().find(|ev| match ev {
+ MonitorEvent::HolderForceClosedWithInfo { .. } => true,
+ _ => false,
+ }) {
+ Some(MonitorEvent::HolderForceClosedWithInfo { outpoint, .. }) => {
+ let mut pending_monitor_events = self.pending_monitor_events.clone();
+ pending_monitor_events.push(MonitorEvent::HolderForceClosed(*outpoint));
+ pending_monitor_events
+ }
+ _ => self.pending_monitor_events.clone(),
+ };
+
write_tlv_fields!(writer, {
(1, self.funding_spend_confirmed, option),
(3, self.htlcs_resolved_on_chain, required_vec),
- (5, self.pending_monitor_events, required_vec),
+ (5, pending_monitor_events, required_vec),
(7, self.funding_spend_seen, required),
(9, self.counterparty_node_id, option),
(11, self.confirmed_commitment_tx_counterparty_output, option),
(13, self.spendable_txids_confirmed, required_vec),
(15, self.counterparty_fulfilled_htlcs, required),
(17, self.initial_counterparty_commitment_info, option),
+ (19, self.channel_id, required),
+ (21, self.balances_empty_height, option),
});
Ok(())
pub(crate) fn from_impl<S: WriteableEcdsaChannelSigner>(logger: &'a L, monitor_impl: &ChannelMonitorImpl<S>) -> Self {
let peer_id = monitor_impl.counterparty_node_id;
- let channel_id = Some(monitor_impl.funding_info.0.to_channel_id());
+ let channel_id = Some(monitor_impl.channel_id());
WithChannelMonitor {
logger, peer_id, channel_id,
}
funding_redeemscript: ScriptBuf, channel_value_satoshis: u64,
commitment_transaction_number_obscure_factor: u64,
initial_holder_commitment_tx: HolderCommitmentTransaction,
- best_block: BestBlock, counterparty_node_id: PublicKey) -> ChannelMonitor<Signer> {
+ best_block: BestBlock, counterparty_node_id: PublicKey, channel_id: ChannelId,
+ ) -> ChannelMonitor<Signer> {
assert!(commitment_transaction_number_obscure_factor <= (1 << 48));
let counterparty_payment_script = chan_utils::get_counterparty_payment_script(
channel_parameters.clone(), initial_holder_commitment_tx, secp_ctx
);
- let mut outputs_to_watch = HashMap::new();
+ let mut outputs_to_watch = new_hash_map();
outputs_to_watch.insert(funding_info.0.txid, vec![(funding_info.0.index as u32, funding_info.1.clone())]);
Self::from_impl(ChannelMonitorImpl {
channel_keys_id,
holder_revocation_basepoint,
+ channel_id,
funding_info,
current_counterparty_commitment_txid: None,
prev_counterparty_commitment_txid: None,
on_holder_tx_csv: counterparty_channel_parameters.selected_contest_delay,
commitment_secrets: CounterpartyCommitmentSecrets::new(),
- counterparty_claimable_outpoints: HashMap::new(),
- counterparty_commitment_txn_on_chain: HashMap::new(),
- counterparty_hash_commitment_number: HashMap::new(),
- counterparty_fulfilled_htlcs: HashMap::new(),
+ counterparty_claimable_outpoints: new_hash_map(),
+ counterparty_commitment_txn_on_chain: new_hash_map(),
+ counterparty_hash_commitment_number: new_hash_map(),
+ counterparty_fulfilled_htlcs: new_hash_map(),
prev_holder_signed_commitment_tx: None,
current_holder_commitment_tx: holder_commitment_tx,
current_counterparty_commitment_number: 1 << 48,
current_holder_commitment_number,
- payment_preimages: HashMap::new(),
+ payment_preimages: new_hash_map(),
pending_monitor_events: Vec::new(),
pending_events: Vec::new(),
is_processing_pending_events: false,
best_block,
counterparty_node_id: Some(counterparty_node_id),
initial_counterparty_commitment_info: None,
+ balances_empty_height: None,
})
}
self.inner.lock().unwrap().get_funding_txo().clone()
}
+ /// Gets the channel_id of the channel this ChannelMonitor is monitoring for.
+ pub fn channel_id(&self) -> ChannelId {
+ self.inner.lock().unwrap().channel_id()
+ }
+
/// Gets a list of txids, with their output scripts (in the order they appear in the
/// transaction), which we must learn about spends of via block_connected().
pub fn get_outputs_to_watch(&self) -> Vec<(Txid, Vec<(u32, ScriptBuf)>)> {
/// Loads the funding txo and outputs to watch into the given `chain::Filter` by repeatedly
/// calling `chain::Filter::register_output` and `chain::Filter::register_tx` until all outputs
/// have been registered.
- pub fn load_outputs_to_watch<F: Deref, L: Deref>(&self, filter: &F, logger: &L)
- where
+ pub fn load_outputs_to_watch<F: Deref, L: Deref>(&self, filter: &F, logger: &L)
+ where
F::Target: chain::Filter, L::Target: Logger,
{
let lock = self.inner.lock().unwrap();
self.inner.lock().unwrap().counterparty_node_id
}
- /// Used by [`ChannelManager`] deserialization to broadcast the latest holder state if its copy
- /// of the channel state was out-of-date.
- ///
- /// You may also use this to broadcast the latest local commitment transaction, either because
+ /// You may use this to broadcast the latest local commitment transaction, either because
/// a monitor update failed or because we've fallen behind (i.e. we've received proof that our
/// counterparty side knows a revocation secret we gave them that they shouldn't know).
///
- /// Broadcasting these transactions in the second case is UNSAFE, as they allow counterparty
+ /// Broadcasting these transactions in this manner is UNSAFE, as they allow counterparty
/// side to punish you. Nevertheless you may want to broadcast them if counterparty doesn't
/// close channel with their commitment transaction after a substantial amount of time. Best
/// may be to contact the other node operator out-of-band to coordinate other options available
/// to you.
- ///
- /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
- pub fn get_latest_holder_commitment_txn<L: Deref>(&self, logger: &L) -> Vec<Transaction>
- where L::Target: Logger {
+ pub fn broadcast_latest_holder_commitment_txn<B: Deref, F: Deref, L: Deref>(
+ &self, broadcaster: &B, fee_estimator: &F, logger: &L
+ )
+ where
+ B::Target: BroadcasterInterface,
+ F::Target: FeeEstimator,
+ L::Target: Logger
+ {
let mut inner = self.inner.lock().unwrap();
+ let fee_estimator = LowerBoundedFeeEstimator::new(&**fee_estimator);
let logger = WithChannelMonitor::from_impl(logger, &*inner);
- inner.get_latest_holder_commitment_txn(&logger)
+ inner.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &fee_estimator, &logger);
}
- /// Unsafe test-only version of get_latest_holder_commitment_txn used by our test framework
+ /// Unsafe test-only version of `broadcast_latest_holder_commitment_txn` used by our test framework
/// to bypass HolderCommitmentTransaction state update lockdown after signature and generate
/// revoked commitment transaction.
#[cfg(any(test, feature = "unsafe_revoked_tx_signing"))]
let logger = WithChannelMonitor::from_impl(logger, &*inner);
let current_height = inner.best_block.height;
inner.onchain_tx_handler.rebroadcast_pending_claims(
- current_height, &broadcaster, &fee_estimator, &logger,
+ current_height, FeerateStrategy::HighestOfPreviousOrNew, &broadcaster, &fee_estimator, &logger,
+ );
+ }
+
+ /// Triggers rebroadcasts of pending claims from a force-closed channel after a transaction
+ /// signature generation failure.
+ pub fn signer_unblocked<B: Deref, F: Deref, L: Deref>(
+ &self, broadcaster: B, fee_estimator: F, logger: &L,
+ )
+ where
+ B::Target: BroadcasterInterface,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
+ {
+ let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator);
+ let mut inner = self.inner.lock().unwrap();
+ let logger = WithChannelMonitor::from_impl(logger, &*inner);
+ let current_height = inner.best_block.height;
+ inner.onchain_tx_handler.rebroadcast_pending_claims(
+ current_height, FeerateStrategy::RetryPrevious, &broadcaster, &fee_estimator, &logger,
);
}
spendable_outputs
}
+ /// Checks if the monitor is fully resolved. Resolved monitor is one that has claimed all of
+ /// its outputs and balances (i.e. [`Self::get_claimable_balances`] returns an empty set).
+ ///
+ /// This function returns true only if [`Self::get_claimable_balances`] has been empty for at least
+ /// 2016 blocks as an additional protection against any bugs resulting in spuriously empty balance sets.
+ pub fn is_fully_resolved<L: Logger>(&self, logger: &L) -> bool {
+ let mut is_all_funds_claimed = self.get_claimable_balances().is_empty();
+ let current_height = self.current_best_block().height;
+ let mut inner = self.inner.lock().unwrap();
+
+ if is_all_funds_claimed {
+ if !inner.funding_spend_seen {
+ debug_assert!(false, "We should see funding spend by the time a monitor clears out");
+ is_all_funds_claimed = false;
+ }
+ }
+
+ match (inner.balances_empty_height, is_all_funds_claimed) {
+ (Some(balances_empty_height), true) => {
+ // Claimed all funds, check if reached the blocks threshold.
+ const BLOCKS_THRESHOLD: u32 = 4032; // ~four weeks
+ return current_height >= balances_empty_height + BLOCKS_THRESHOLD;
+ },
+ (Some(_), false) => {
+ // previously assumed we claimed all funds, but we have new funds to claim.
+ // Should not happen in practice.
+ debug_assert!(false, "Thought we were done claiming funds, but claimable_balances now has entries");
+ log_error!(logger,
+ "WARNING: LDK thought it was done claiming all the available funds in the ChannelMonitor for channel {}, but later decided it had more to claim. This is potentially an important bug in LDK, please report it at https://github.com/lightningdevkit/rust-lightning/issues/new",
+ inner.get_funding_txo().0);
+ inner.balances_empty_height = None;
+ false
+ },
+ (None, true) => {
+ // Claimed all funds but `balances_empty_height` is None. It is set to the
+ // current block height.
+ inner.balances_empty_height = Some(current_height);
+ false
+ },
+ (None, false) => {
+ // Have funds to claim.
+ false
+ },
+ }
+ }
+
#[cfg(test)]
pub fn get_counterparty_payment_script(&self) -> ScriptBuf {
self.inner.lock().unwrap().counterparty_payment_script.clone()
pub fn set_counterparty_payment_script(&self, script: ScriptBuf) {
self.inner.lock().unwrap().counterparty_payment_script = script;
}
+
+ #[cfg(test)]
+ pub fn do_signer_call<F: FnMut(&Signer) -> ()>(&self, mut f: F) {
+ let inner = self.inner.lock().unwrap();
+ f(&inner.onchain_tx_handler.signer);
+ }
}
impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
/// HTLCs which were resolved on-chain (i.e. where the final HTLC resolution was done by an
/// event from this `ChannelMonitor`).
pub(crate) fn get_all_current_outbound_htlcs(&self) -> HashMap<HTLCSource, (HTLCOutputInCommitment, Option<PaymentPreimage>)> {
- let mut res = HashMap::new();
+ let mut res = new_hash_map();
// Just examine the available counterparty commitment transactions. See docs on
// `fail_unbroadcast_htlcs`, below, for justification.
let us = self.inner.lock().unwrap();
return self.get_all_current_outbound_htlcs();
}
- let mut res = HashMap::new();
+ let mut res = new_hash_map();
macro_rules! walk_htlcs {
($holder_commitment: expr, $htlc_iter: expr) => {
for (htlc, source) in $htlc_iter {
// before considering it "no longer pending" - this matches when we
// provide the ChannelManager an HTLC failure event.
Some(commitment_tx_output_idx) == htlc.transaction_output_index &&
- us.best_block.height() >= event.height + ANTI_REORG_DELAY - 1
+ us.best_block.height >= event.height + ANTI_REORG_DELAY - 1
} else if let OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx, .. } = event.event {
// If the HTLC was fulfilled with a preimage, we consider the HTLC
// immediately non-pending, matching when we provide ChannelManager
macro_rules! claim_htlcs {
($commitment_number: expr, $txid: expr) => {
let (htlc_claim_reqs, _) = self.get_counterparty_output_claim_info($commitment_number, $txid, None);
- self.onchain_tx_handler.update_claims_view_from_requests(htlc_claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
+ self.onchain_tx_handler.update_claims_view_from_requests(htlc_claim_reqs, self.best_block.height, self.best_block.height, broadcaster, fee_estimator, logger);
}
}
if let Some(txid) = self.current_counterparty_commitment_txid {
// Assume that the broadcasted commitment transaction confirmed in the current best
// block. Even if not, its a reasonable metric for the bump criteria on the HTLC
// transactions.
- let (claim_reqs, _) = self.get_broadcasted_holder_claims(&holder_commitment_tx, self.best_block.height());
- self.onchain_tx_handler.update_claims_view_from_requests(claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
+ let (claim_reqs, _) = self.get_broadcasted_holder_claims(&holder_commitment_tx, self.best_block.height);
+ self.onchain_tx_handler.update_claims_view_from_requests(claim_reqs, self.best_block.height, self.best_block.height, broadcaster, fee_estimator, logger);
}
}
}
- fn generate_claimable_outpoints_and_watch_outputs(&mut self) -> (Vec<PackageTemplate>, Vec<TransactionOutputs>) {
+ fn generate_claimable_outpoints_and_watch_outputs(&mut self, reason: ClosureReason) -> (Vec<PackageTemplate>, Vec<TransactionOutputs>) {
let funding_outp = HolderFundingOutput::build(
self.funding_redeemscript.clone(),
self.channel_value_satoshis,
let commitment_package = PackageTemplate::build_package(
self.funding_info.0.txid.clone(), self.funding_info.0.index as u32,
PackageSolvingData::HolderFundingOutput(funding_outp),
- self.best_block.height(), self.best_block.height()
+ self.best_block.height, self.best_block.height
);
let mut claimable_outpoints = vec![commitment_package];
- self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0));
+ let event = MonitorEvent::HolderForceClosedWithInfo {
+ reason,
+ outpoint: self.funding_info.0,
+ channel_id: self.channel_id,
+ };
+ self.pending_monitor_events.push(event);
+
// Although we aren't signing the transaction directly here, the transaction will be signed
// in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject
// new channel updates.
// assuming it gets confirmed in the next block. Sadly, we have code which considers
// "not yet confirmed" things as discardable, so we cannot do that here.
let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(
- &self.current_holder_commitment_tx, self.best_block.height()
+ &self.current_holder_commitment_tx, self.best_block.height
);
let unsigned_commitment_tx = self.onchain_tx_handler.get_unsigned_holder_commitment_tx();
let new_outputs = self.get_broadcasted_holder_watch_outputs(
F::Target: FeeEstimator,
L::Target: Logger,
{
- let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs();
+ let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs(ClosureReason::HolderForceClosed);
self.onchain_tx_handler.update_claims_view_from_requests(
- claimable_outpoints, self.best_block.height(), self.best_block.height(), broadcaster,
+ claimable_outpoints, self.best_block.height, self.best_block.height, broadcaster,
fee_estimator, logger
);
}
self.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &bounded_fee_estimator, logger);
} else if !self.holder_tx_signed {
log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast");
- log_error!(logger, " in channel monitor for channel {}!", &self.funding_info.0.to_channel_id());
- log_error!(logger, " Read the docs for ChannelMonitor::get_latest_holder_commitment_txn and take manual action!");
+ log_error!(logger, " in channel monitor for channel {}!", &self.channel_id());
+ log_error!(logger, " Read the docs for ChannelMonitor::broadcast_latest_holder_commitment_txn to take manual action!");
} else {
// If we generated a MonitorEvent::HolderForceClosed, the ChannelManager
// will still give us a ChannelForceClosed event with !should_broadcast, but we
&self.funding_info
}
+ pub fn channel_id(&self) -> ChannelId {
+ self.channel_id
+ }
+
fn get_outputs_to_watch(&self) -> &HashMap<Txid, Vec<(u32, ScriptBuf)>> {
// If we've detected a counterparty commitment tx on chain, we must include it in the set
// of outputs to watch for spends of, otherwise we're likely to lose user funds. Because
ClaimEvent::BumpCommitment {
package_target_feerate_sat_per_1000_weight, commitment_tx, anchor_output_idx,
} => {
+ let channel_id = self.channel_id;
+ // unwrap safety: `ClaimEvent`s are only available for Anchor channels,
+ // introduced with v0.0.116. counterparty_node_id is guaranteed to be `Some`
+ // since v0.0.110.
+ let counterparty_node_id = self.counterparty_node_id.unwrap();
let commitment_txid = commitment_tx.txid();
debug_assert_eq!(self.current_holder_commitment_tx.txid, commitment_txid);
let pending_htlcs = self.current_holder_commitment_tx.non_dust_htlcs();
let commitment_tx_fee_satoshis = self.channel_value_satoshis -
commitment_tx.output.iter().fold(0u64, |sum, output| sum + output.value);
ret.push(Event::BumpTransaction(BumpTransactionEvent::ChannelClose {
+ channel_id,
+ counterparty_node_id,
claim_id,
package_target_feerate_sat_per_1000_weight,
commitment_tx,
ClaimEvent::BumpHTLC {
target_feerate_sat_per_1000_weight, htlcs, tx_lock_time,
} => {
+ let channel_id = self.channel_id;
+ // unwrap safety: `ClaimEvent`s are only available for Anchor channels,
+ // introduced with v0.0.116. counterparty_node_id is guaranteed to be `Some`
+ // since v0.0.110.
+ let counterparty_node_id = self.counterparty_node_id.unwrap();
let mut htlc_descriptors = Vec::with_capacity(htlcs.len());
for htlc in htlcs {
htlc_descriptors.push(HTLCDescriptor {
});
}
ret.push(Event::BumpTransaction(BumpTransactionEvent::HTLCResolution {
+ channel_id,
+ counterparty_node_id,
claim_id,
target_feerate_sat_per_1000_weight,
htlc_descriptors,
(htlc, htlc_source.as_ref().map(|htlc_source| htlc_source.as_ref()))
), logger);
} else {
- debug_assert!(false, "We should have per-commitment option for any recognized old commitment txn");
+ // Our fuzzers aren't constrained by pesky things like valid signatures, so can
+ // spend our funding output with a transaction which doesn't match our past
+ // commitment transactions. Thus, we can only debug-assert here when not
+ // fuzzing.
+ debug_assert!(cfg!(fuzzing), "We should have per-commitment option for any recognized old commitment txn");
fail_unbroadcast_htlcs!(self, "revoked counterparty", commitment_txid, tx, height,
block_hash, [].iter().map(|reference| *reference), logger);
}
}
}
- fn get_latest_holder_commitment_txn<L: Deref>(
- &mut self, logger: &WithChannelMonitor<L>,
- ) -> Vec<Transaction> where L::Target: Logger {
- log_debug!(logger, "Getting signed latest holder commitment transaction!");
- self.holder_tx_signed = true;
- let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript);
- let txid = commitment_tx.txid();
- let mut holder_transactions = vec![commitment_tx];
- // When anchor outputs are present, the HTLC transactions are only valid once the commitment
- // transaction confirms.
- if self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
- return holder_transactions;
- }
- for htlc in self.current_holder_commitment_tx.htlc_outputs.iter() {
- if let Some(vout) = htlc.0.transaction_output_index {
- let preimage = if !htlc.0.offered {
- if let Some(preimage) = self.payment_preimages.get(&htlc.0.payment_hash) { Some(preimage.clone()) } else {
- // We can't build an HTLC-Success transaction without the preimage
- continue;
- }
- } else if htlc.0.cltv_expiry > self.best_block.height() + 1 {
- // Don't broadcast HTLC-Timeout transactions immediately as they don't meet the
- // current locktime requirements on-chain. We will broadcast them in
- // `block_confirmed` when `should_broadcast_holder_commitment_txn` returns true.
- // Note that we add + 1 as transactions are broadcastable when they can be
- // confirmed in the next block.
- continue;
- } else { None };
- if let Some(htlc_tx) = self.onchain_tx_handler.get_fully_signed_htlc_tx(
- &::bitcoin::OutPoint { txid, vout }, &preimage) {
- holder_transactions.push(htlc_tx);
- }
- }
- }
- // We throw away the generated waiting_first_conf data as we aren't (yet) confirmed and we don't actually know what the caller wants to do.
- // The data will be re-generated and tracked in check_spend_holder_transaction if we get a confirmation.
- holder_transactions
- }
-
#[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
/// Note that this includes possibly-locktimed-in-the-future transactions!
fn unsafe_get_latest_holder_commitment_txn<L: Deref>(
continue;
}
} else { None };
- if let Some(htlc_tx) = self.onchain_tx_handler.get_fully_signed_htlc_tx(
- &::bitcoin::OutPoint { txid, vout }, &preimage) {
- holder_transactions.push(htlc_tx);
+ if let Some(htlc_tx) = self.onchain_tx_handler.get_maybe_signed_htlc_tx(
+ &::bitcoin::OutPoint { txid, vout }, &preimage
+ ) {
+ if htlc_tx.is_fully_signed() {
+ holder_transactions.push(htlc_tx.0);
+ }
}
}
}
{
let block_hash = header.block_hash();
- if height > self.best_block.height() {
+ if height > self.best_block.height {
self.best_block = BestBlock::new(block_hash, height);
log_trace!(logger, "Connecting new block {} at height {}", block_hash, height);
self.block_confirmed(height, block_hash, vec![], vec![], vec![], &broadcaster, &fee_estimator, logger)
- } else if block_hash != self.best_block.block_hash() {
+ } else if block_hash != self.best_block.block_hash {
self.best_block = BestBlock::new(block_hash, height);
log_trace!(logger, "Best block re-orged, replaced with new block {} at height {}", block_hash, height);
self.onchain_events_awaiting_threshold_conf.retain(|ref entry| entry.height <= height);
if prevout.txid == self.funding_info.0.txid && prevout.vout == self.funding_info.0.index as u32 {
let mut balance_spendable_csv = None;
log_info!(logger, "Channel {} closed by funding output spend in txid {}.",
- &self.funding_info.0.to_channel_id(), txid);
+ &self.channel_id(), txid);
self.funding_spend_seen = true;
let mut commitment_tx_to_counterparty_output = None;
if (tx.input[0].sequence.0 >> 8*3) as u8 == 0x80 && (tx.lock_time.to_consensus_u32() >> 8*3) as u8 == 0x20 {
claimable_outpoints.append(&mut new_outpoints);
if new_outpoints.is_empty() {
if let Some((mut new_outpoints, new_outputs)) = self.check_spend_holder_transaction(&tx, height, &block_hash, &logger) {
+ #[cfg(not(fuzzing))]
debug_assert!(commitment_tx_to_counterparty_output.is_none(),
"A commitment transaction matched as both a counterparty and local commitment tx?");
if !new_outputs.1.is_empty() {
}
}
- if height > self.best_block.height() {
+ if height > self.best_block.height {
self.best_block = BestBlock::new(block_hash, height);
}
L::Target: Logger,
{
log_trace!(logger, "Processing {} matched transactions for block at height {}.", txn_matched.len(), conf_height);
- debug_assert!(self.best_block.height() >= conf_height);
+ debug_assert!(self.best_block.height >= conf_height);
let should_broadcast = self.should_broadcast_holder_commitment_txn(logger);
if should_broadcast {
- let (mut new_outpoints, mut new_outputs) = self.generate_claimable_outpoints_and_watch_outputs();
+ let (mut new_outpoints, mut new_outputs) = self.generate_claimable_outpoints_and_watch_outputs(ClosureReason::HTLCsTimedOut);
claimable_outpoints.append(&mut new_outpoints);
watch_outputs.append(&mut new_outputs);
}
log_debug!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor));
self.pending_events.push(Event::SpendableOutputs {
outputs: vec![descriptor],
- channel_id: Some(self.funding_info.0.to_channel_id()),
+ channel_id: Some(self.channel_id()),
});
self.spendable_txids_confirmed.push(entry.txid);
},
}
}
- self.onchain_tx_handler.update_claims_view_from_requests(claimable_outpoints, conf_height, self.best_block.height(), broadcaster, fee_estimator, logger);
- self.onchain_tx_handler.update_claims_view_from_matched_txn(&txn_matched, conf_height, conf_hash, self.best_block.height(), broadcaster, fee_estimator, logger);
+ self.onchain_tx_handler.update_claims_view_from_requests(claimable_outpoints, conf_height, self.best_block.height, broadcaster, fee_estimator, logger);
+ self.onchain_tx_handler.update_claims_view_from_matched_txn(&txn_matched, conf_height, conf_hash, self.best_block.height, broadcaster, fee_estimator, logger);
// Determine new outputs to watch by comparing against previously known outputs to watch,
// updating the latter in the process.
/// Filters a block's `txdata` for transactions spending watched outputs or for any child
/// transactions thereof.
fn filter_block<'a>(&self, txdata: &TransactionData<'a>) -> Vec<&'a Transaction> {
- let mut matched_txn = HashSet::new();
+ let mut matched_txn = new_hash_set();
txdata.iter().filter(|&&(_, tx)| {
let mut matches = self.spends_watched_output(tx);
for input in tx.input.iter() {
// to the source, and if we don't fail the channel we will have to ensure that the next
// updates that peer sends us are update_fails, failing the channel if not. It's probably
// easier to just fail the channel as this case should be rare enough anyway.
- let height = self.best_block.height();
+ let height = self.best_block.height;
macro_rules! scan_commitment {
($htlcs: expr, $holder_tx: expr) => {
for ref htlc in $htlcs {
revocation_pubkey: broadcasted_holder_revokable_script.2,
channel_keys_id: self.channel_keys_id,
channel_value_satoshis: self.channel_value_satoshis,
+ channel_transaction_parameters: Some(self.onchain_tx_handler.channel_transaction_parameters.clone()),
}));
}
}
}
let counterparty_claimable_outpoints_len: u64 = Readable::read(reader)?;
- let mut counterparty_claimable_outpoints = HashMap::with_capacity(cmp::min(counterparty_claimable_outpoints_len as usize, MAX_ALLOC_SIZE / 64));
+ let mut counterparty_claimable_outpoints = hash_map_with_capacity(cmp::min(counterparty_claimable_outpoints_len as usize, MAX_ALLOC_SIZE / 64));
for _ in 0..counterparty_claimable_outpoints_len {
let txid: Txid = Readable::read(reader)?;
let htlcs_count: u64 = Readable::read(reader)?;
}
let counterparty_commitment_txn_on_chain_len: u64 = Readable::read(reader)?;
- let mut counterparty_commitment_txn_on_chain = HashMap::with_capacity(cmp::min(counterparty_commitment_txn_on_chain_len as usize, MAX_ALLOC_SIZE / 32));
+ let mut counterparty_commitment_txn_on_chain = hash_map_with_capacity(cmp::min(counterparty_commitment_txn_on_chain_len as usize, MAX_ALLOC_SIZE / 32));
for _ in 0..counterparty_commitment_txn_on_chain_len {
let txid: Txid = Readable::read(reader)?;
let commitment_number = <U48 as Readable>::read(reader)?.0;
}
let counterparty_hash_commitment_number_len: u64 = Readable::read(reader)?;
- let mut counterparty_hash_commitment_number = HashMap::with_capacity(cmp::min(counterparty_hash_commitment_number_len as usize, MAX_ALLOC_SIZE / 32));
+ let mut counterparty_hash_commitment_number = hash_map_with_capacity(cmp::min(counterparty_hash_commitment_number_len as usize, MAX_ALLOC_SIZE / 32));
for _ in 0..counterparty_hash_commitment_number_len {
let payment_hash: PaymentHash = Readable::read(reader)?;
let commitment_number = <U48 as Readable>::read(reader)?.0;
let current_holder_commitment_number = <U48 as Readable>::read(reader)?.0;
let payment_preimages_len: u64 = Readable::read(reader)?;
- let mut payment_preimages = HashMap::with_capacity(cmp::min(payment_preimages_len as usize, MAX_ALLOC_SIZE / 32));
+ let mut payment_preimages = hash_map_with_capacity(cmp::min(payment_preimages_len as usize, MAX_ALLOC_SIZE / 32));
for _ in 0..payment_preimages_len {
let preimage: PaymentPreimage = Readable::read(reader)?;
let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array());
}
let outputs_to_watch_len: u64 = Readable::read(reader)?;
- let mut outputs_to_watch = HashMap::with_capacity(cmp::min(outputs_to_watch_len as usize, MAX_ALLOC_SIZE / (mem::size_of::<Txid>() + mem::size_of::<u32>() + mem::size_of::<Vec<ScriptBuf>>())));
+ let mut outputs_to_watch = hash_map_with_capacity(cmp::min(outputs_to_watch_len as usize, MAX_ALLOC_SIZE / (mem::size_of::<Txid>() + mem::size_of::<u32>() + mem::size_of::<Vec<ScriptBuf>>())));
for _ in 0..outputs_to_watch_len {
let txid = Readable::read(reader)?;
let outputs_len: u64 = Readable::read(reader)?;
let mut counterparty_node_id = None;
let mut confirmed_commitment_tx_counterparty_output = None;
let mut spendable_txids_confirmed = Some(Vec::new());
- let mut counterparty_fulfilled_htlcs = Some(HashMap::new());
+ let mut counterparty_fulfilled_htlcs = Some(new_hash_map());
let mut initial_counterparty_commitment_info = None;
+ let mut balances_empty_height = None;
+ let mut channel_id = None;
read_tlv_fields!(reader, {
(1, funding_spend_confirmed, option),
(3, htlcs_resolved_on_chain, optional_vec),
(13, spendable_txids_confirmed, optional_vec),
(15, counterparty_fulfilled_htlcs, option),
(17, initial_counterparty_commitment_info, option),
+ (19, channel_id, option),
+ (21, balances_empty_height, option),
});
+ // `HolderForceClosedWithInfo` replaced `HolderForceClosed` in v0.0.122. If we have both
+ // events, we can remove the `HolderForceClosed` event and just keep the `HolderForceClosedWithInfo`.
+ if let Some(ref mut pending_monitor_events) = pending_monitor_events {
+ if pending_monitor_events.iter().any(|e| matches!(e, MonitorEvent::HolderForceClosed(_))) &&
+ pending_monitor_events.iter().any(|e| matches!(e, MonitorEvent::HolderForceClosedWithInfo { .. }))
+ {
+ pending_monitor_events.retain(|e| !matches!(e, MonitorEvent::HolderForceClosed(_)));
+ }
+ }
+
// Monitors for anchor outputs channels opened in v0.0.116 suffered from a bug in which the
// wrong `counterparty_payment_script` was being tracked. Fix it now on deserialization to
// give them a chance to recognize the spendable output.
chan_utils::get_to_countersignatory_with_anchors_redeemscript(&payment_point).to_v0_p2wsh();
}
- Ok((best_block.block_hash(), ChannelMonitor::from_impl(ChannelMonitorImpl {
+ Ok((best_block.block_hash, ChannelMonitor::from_impl(ChannelMonitorImpl {
latest_update_id,
commitment_transaction_number_obscure_factor,
channel_keys_id,
holder_revocation_basepoint,
+ channel_id: channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(outpoint)),
funding_info,
current_counterparty_commitment_txid,
prev_counterparty_commitment_txid,
best_block,
counterparty_node_id,
initial_counterparty_commitment_info,
+ balances_empty_height,
})))
}
}
use crate::chain::package::{weight_offered_htlc, weight_received_htlc, weight_revoked_offered_htlc, weight_revoked_received_htlc, WEIGHT_REVOKED_OUTPUT};
use crate::chain::transaction::OutPoint;
use crate::sign::InMemorySigner;
- use crate::ln::{PaymentPreimage, PaymentHash};
+ use crate::ln::{PaymentPreimage, PaymentHash, ChannelId};
use crate::ln::channel_keys::{DelayedPaymentBasepoint, DelayedPaymentKey, HtlcBasepoint, RevocationBasepoint, RevocationKey};
use crate::ln::chan_utils::{self,HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
use crate::ln::channelmanager::{PaymentSendFailure, PaymentId, RecipientOnionFields};
use crate::sync::{Arc, Mutex};
use crate::io;
use crate::ln::features::ChannelTypeFeatures;
+
+ #[allow(unused_imports)]
use crate::prelude::*;
use std::str::FromStr;
htlc_basepoint: HtlcBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[48; 32]).unwrap()))
};
let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::max_value() };
+ let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
let channel_parameters = ChannelTransactionParameters {
holder_pubkeys: keys.holder_channel_pubkeys.clone(),
holder_selected_contest_delay: 66,
Some(ShutdownScript::new_p2wpkh_from_pubkey(shutdown_pubkey).into_inner()), 0, &ScriptBuf::new(),
(OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, ScriptBuf::new()),
&channel_parameters, ScriptBuf::new(), 46, 0, HolderCommitmentTransaction::dummy(&mut Vec::new()),
- best_block, dummy_key);
+ best_block, dummy_key, channel_id);
let mut htlcs = preimages_slice_to_htlcs!(preimages[0..10]);
let dummy_commitment_tx = HolderCommitmentTransaction::dummy(&mut htlcs);
htlc_basepoint: HtlcBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[48; 32]).unwrap())),
};
let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::max_value() };
+ let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
let channel_parameters = ChannelTransactionParameters {
holder_pubkeys: keys.holder_channel_pubkeys.clone(),
holder_selected_contest_delay: 66,
Some(ShutdownScript::new_p2wpkh_from_pubkey(shutdown_pubkey).into_inner()), 0, &ScriptBuf::new(),
(OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, ScriptBuf::new()),
&channel_parameters, ScriptBuf::new(), 46, 0, HolderCommitmentTransaction::dummy(&mut Vec::new()),
- best_block, dummy_key);
+ best_block, dummy_key, channel_id);
- let chan_id = monitor.inner.lock().unwrap().funding_info.0.to_channel_id().clone();
+ let chan_id = monitor.inner.lock().unwrap().channel_id();
let context_logger = WithChannelMonitor::from(&logger, &monitor);
log_error!(context_logger, "This is an error");
log_warn!(context_logger, "This is an error");
use bitcoin::secp256k1::PublicKey;
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, MonitorEvent};
+use crate::ln::ChannelId;
use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
use crate::chain::transaction::{OutPoint, TransactionData};
+use crate::impl_writeable_tlv_based;
+#[allow(unused_imports)]
use crate::prelude::*;
pub mod chaininterface;
pub(crate) mod package;
/// The best known block as identified by its hash and height.
-#[derive(Clone, Copy, PartialEq, Eq)]
+#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct BestBlock {
- block_hash: BlockHash,
- height: u32,
+ /// The block's hash
+ pub block_hash: BlockHash,
+ /// The height at which the block was confirmed.
+ pub height: u32,
}
impl BestBlock {
pub fn new(block_hash: BlockHash, height: u32) -> Self {
BestBlock { block_hash, height }
}
-
- /// Returns the best block hash.
- pub fn block_hash(&self) -> BlockHash { self.block_hash }
-
- /// Returns the best block height.
- pub fn height(&self) -> u32 { self.height }
}
+impl_writeable_tlv_based!(BestBlock, {
+ (0, block_hash, required),
+ (2, height, required),
+});
+
/// The `Listen` trait is used to notify when blocks have been connected or disconnected from the
/// chain.
///
/// For details on asynchronous [`ChannelMonitor`] updating and returning
/// [`MonitorEvent::Completed`] here, see [`ChannelMonitorUpdateStatus::InProgress`].
- fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)>;
+ fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)>;
}
/// The `Filter` trait defines behavior for indicating chain activity of interest pertaining to
use crate::chain::chaininterface::{ConfirmationTarget, FeeEstimator, BroadcasterInterface, LowerBoundedFeeEstimator};
use crate::chain::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER};
use crate::chain::package::{PackageSolvingData, PackageTemplate};
+use crate::chain::transaction::MaybeSignedTransaction;
use crate::util::logger::Logger;
use crate::util::ser::{Readable, ReadableArgs, MaybeReadable, UpgradableRequired, Writer, Writeable, VecWriter};
/// control) onchain.
pub(crate) enum OnchainClaim {
/// A finalized transaction pending confirmation spending the output to claim.
- Tx(Transaction),
+ Tx(MaybeSignedTransaction),
/// An event yielded externally to signal additional inputs must be added to a transaction
/// pending confirmation spending the output to claim.
Event(ClaimEvent),
}
+/// Represents the different feerate strategies a pending request can use when generating a claim.
+pub(crate) enum FeerateStrategy {
+ /// We must reuse the most recently used feerate, if any.
+ RetryPrevious,
+ /// We must pick the highest between the most recently used and the current feerate estimate.
+ HighestOfPreviousOrNew,
+ /// We must force a bump of the most recently used feerate, either by using the current feerate
+ /// estimate if it's higher, or manually bumping.
+ ForceBump,
+}
+
/// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and
/// do RBF bumping if possible.
#[derive(Clone)]
signer.provide_channel_parameters(&channel_parameters);
let pending_claim_requests_len: u64 = Readable::read(reader)?;
- let mut pending_claim_requests = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
+ let mut pending_claim_requests = hash_map_with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
for _ in 0..pending_claim_requests_len {
pending_claim_requests.insert(Readable::read(reader)?, Readable::read(reader)?);
}
let claimable_outpoints_len: u64 = Readable::read(reader)?;
- let mut claimable_outpoints = HashMap::with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
+ let mut claimable_outpoints = hash_map_with_capacity(cmp::min(pending_claim_requests_len as usize, MAX_ALLOC_SIZE / 128));
for _ in 0..claimable_outpoints_len {
let outpoint = Readable::read(reader)?;
let ancestor_claim_txid = Readable::read(reader)?;
prev_holder_commitment: None,
signer,
channel_transaction_parameters: channel_parameters,
- pending_claim_requests: HashMap::new(),
- claimable_outpoints: HashMap::new(),
+ pending_claim_requests: new_hash_map(),
+ claimable_outpoints: new_hash_map(),
locktimed_packages: BTreeMap::new(),
onchain_events_awaiting_threshold_conf: Vec::new(),
pending_claim_events: Vec::new(),
/// invoking this every 30 seconds, or lower if running in an environment with spotty
/// connections, like on mobile.
pub(super) fn rebroadcast_pending_claims<B: Deref, F: Deref, L: Logger>(
- &mut self, current_height: u32, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>,
- logger: &L,
+ &mut self, current_height: u32, feerate_strategy: FeerateStrategy, broadcaster: &B,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L,
)
where
B::Target: BroadcasterInterface,
bump_requests.push((*claim_id, request.clone()));
}
for (claim_id, request) in bump_requests {
- self.generate_claim(current_height, &request, false /* force_feerate_bump */, fee_estimator, logger)
+ self.generate_claim(current_height, &request, &feerate_strategy, fee_estimator, logger)
.map(|(_, new_feerate, claim)| {
let mut bumped_feerate = false;
if let Some(mut_request) = self.pending_claim_requests.get_mut(&claim_id) {
}
match claim {
OnchainClaim::Tx(tx) => {
- let log_start = if bumped_feerate { "Broadcasting RBF-bumped" } else { "Rebroadcasting" };
- log_info!(logger, "{} onchain {}", log_start, log_tx!(tx));
- broadcaster.broadcast_transactions(&[&tx]);
+ if tx.is_fully_signed() {
+ let log_start = if bumped_feerate { "Broadcasting RBF-bumped" } else { "Rebroadcasting" };
+ log_info!(logger, "{} onchain {}", log_start, log_tx!(tx.0));
+ broadcaster.broadcast_transactions(&[&tx.0]);
+ } else {
+ log_info!(logger, "Waiting for signature of unsigned onchain transaction {}", tx.0.txid());
+ }
},
OnchainClaim::Event(event) => {
let log_start = if bumped_feerate { "Yielding fee-bumped" } else { "Replaying" };
/// Panics if there are signing errors, because signing operations in reaction to on-chain
/// events are not expected to fail, and if they do, we may lose funds.
fn generate_claim<F: Deref, L: Logger>(
- &mut self, cur_height: u32, cached_request: &PackageTemplate, force_feerate_bump: bool,
+ &mut self, cur_height: u32, cached_request: &PackageTemplate, feerate_strategy: &FeerateStrategy,
fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L,
) -> Option<(u32, u64, OnchainClaim)>
where F::Target: FeeEstimator,
if cached_request.is_malleable() {
if cached_request.requires_external_funding() {
let target_feerate_sat_per_1000_weight = cached_request.compute_package_feerate(
- fee_estimator, ConfirmationTarget::OnChainSweep, force_feerate_bump
+ fee_estimator, ConfirmationTarget::OnChainSweep, feerate_strategy,
);
if let Some(htlcs) = cached_request.construct_malleable_package_with_external_funding(self) {
return Some((
let predicted_weight = cached_request.package_weight(&self.destination_script);
if let Some((output_value, new_feerate)) = cached_request.compute_package_output(
predicted_weight, self.destination_script.dust_value().to_sat(),
- force_feerate_bump, fee_estimator, logger,
+ feerate_strategy, fee_estimator, logger,
) {
assert!(new_feerate != 0);
- let transaction = cached_request.finalize_malleable_package(
+ let transaction = cached_request.maybe_finalize_malleable_package(
cur_height, self, output_value, self.destination_script.clone(), logger
).unwrap();
- log_trace!(logger, "...with timer {} and feerate {}", new_timer, new_feerate);
- assert!(predicted_weight >= transaction.weight().to_wu());
+ assert!(predicted_weight >= transaction.0.weight().to_wu());
return Some((new_timer, new_feerate, OnchainClaim::Tx(transaction)));
}
} else {
// which require external funding.
let mut inputs = cached_request.inputs();
debug_assert_eq!(inputs.len(), 1);
- let tx = match cached_request.finalize_untractable_package(self, logger) {
+ let tx = match cached_request.maybe_finalize_untractable_package(self, logger) {
Some(tx) => tx,
None => return None,
};
// Commitment inputs with anchors support are the only untractable inputs supported
// thus far that require external funding.
PackageSolvingData::HolderFundingOutput(output) => {
- debug_assert_eq!(tx.txid(), self.holder_commitment.trust().txid(),
+ debug_assert_eq!(tx.0.txid(), self.holder_commitment.trust().txid(),
"Holder commitment transaction mismatch");
let conf_target = ConfirmationTarget::OnChainSweep;
let package_target_feerate_sat_per_1000_weight = cached_request
- .compute_package_feerate(fee_estimator, conf_target, force_feerate_bump);
+ .compute_package_feerate(fee_estimator, conf_target, feerate_strategy);
if let Some(input_amount_sat) = output.funding_amount {
- let fee_sat = input_amount_sat - tx.output.iter().map(|output| output.value).sum::<u64>();
+ let fee_sat = input_amount_sat - tx.0.output.iter().map(|output| output.value).sum::<u64>();
let commitment_tx_feerate_sat_per_1000_weight =
- compute_feerate_sat_per_1000_weight(fee_sat, tx.weight().to_wu());
+ compute_feerate_sat_per_1000_weight(fee_sat, tx.0.weight().to_wu());
if commitment_tx_feerate_sat_per_1000_weight >= package_target_feerate_sat_per_1000_weight {
- log_debug!(logger, "Pre-signed {} already has feerate {} sat/kW above required {} sat/kW",
- log_tx!(tx), commitment_tx_feerate_sat_per_1000_weight,
+ log_debug!(logger, "Pre-signed commitment {} already has feerate {} sat/kW above required {} sat/kW",
+ tx.0.txid(), commitment_tx_feerate_sat_per_1000_weight,
package_target_feerate_sat_per_1000_weight);
return Some((new_timer, 0, OnchainClaim::Tx(tx.clone())));
}
// We'll locate an anchor output we can spend within the commitment transaction.
let funding_pubkey = &self.channel_transaction_parameters.holder_pubkeys.funding_pubkey;
- match chan_utils::get_anchor_output(&tx, funding_pubkey) {
+ match chan_utils::get_anchor_output(&tx.0, funding_pubkey) {
// An anchor output was found, so we should yield a funding event externally.
Some((idx, _)) => {
// TODO: Use a lower confirmation target when both our and the
package_target_feerate_sat_per_1000_weight as u64,
OnchainClaim::Event(ClaimEvent::BumpCommitment {
package_target_feerate_sat_per_1000_weight,
- commitment_tx: tx.clone(),
+ commitment_tx: tx.0.clone(),
anchor_output_idx: idx,
}),
))
if let Some(claim_id) = claim_id {
if let Some(claim) = self.pending_claim_requests.remove(&claim_id) {
for outpoint in claim.outpoints() {
- self.claimable_outpoints.remove(&outpoint);
+ self.claimable_outpoints.remove(outpoint);
}
}
} else {
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
{
- log_debug!(logger, "Updating claims view at height {} with {} claim requests", cur_height, requests.len());
+ if !requests.is_empty() {
+ log_debug!(logger, "Updating claims view at height {} with {} claim requests", cur_height, requests.len());
+ }
+
let mut preprocessed_requests = Vec::with_capacity(requests.len());
let mut aggregated_request = None;
// Claim everything up to and including `cur_height`
let remaining_locked_packages = self.locktimed_packages.split_off(&(cur_height + 1));
+ if !self.locktimed_packages.is_empty() {
+ log_debug!(logger,
+ "Updating claims view at height {} with {} locked packages available for claim",
+ cur_height,
+ self.locktimed_packages.len());
+ }
for (pop_height, mut entry) in self.locktimed_packages.iter_mut() {
log_trace!(logger, "Restoring delayed claim of package(s) at their timelock at {}.", pop_height);
preprocessed_requests.append(&mut entry);
// height timer expiration (i.e in how many blocks we're going to take action).
for mut req in preprocessed_requests {
if let Some((new_timer, new_feerate, claim)) = self.generate_claim(
- cur_height, &req, true /* force_feerate_bump */, &*fee_estimator, &*logger,
+ cur_height, &req, &FeerateStrategy::ForceBump, &*fee_estimator, &*logger,
) {
req.set_timer(new_timer);
req.set_feerate(new_feerate);
// `OnchainClaim`.
let claim_id = match claim {
OnchainClaim::Tx(tx) => {
- log_info!(logger, "Broadcasting onchain {}", log_tx!(tx));
- broadcaster.broadcast_transactions(&[&tx]);
- ClaimId(tx.txid().to_byte_array())
+ if tx.is_fully_signed() {
+ log_info!(logger, "Broadcasting onchain {}", log_tx!(tx.0));
+ broadcaster.broadcast_transactions(&[&tx.0]);
+ } else {
+ log_info!(logger, "Waiting for signature of unsigned onchain transaction {}", tx.0.txid());
+ }
+ ClaimId(tx.0.txid().to_byte_array())
},
OnchainClaim::Event(claim_event) => {
log_info!(logger, "Yielding onchain event to spend inputs {:?}", req.outpoints());
claim_id
},
};
- debug_assert!(self.pending_claim_requests.get(&claim_id).is_none());
+ // Because fuzzing can cause hash collisions, we can end up with conflicting claim
+ // ids here, so we only assert when not fuzzing.
+ debug_assert!(cfg!(fuzzing) || self.pending_claim_requests.get(&claim_id).is_none());
for k in req.outpoints() {
log_info!(logger, "Registering claiming request for {}:{}", k.txid, k.vout);
self.claimable_outpoints.insert(k.clone(), (claim_id, conf_height));
B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
{
- log_debug!(logger, "Updating claims view at height {} with {} matched transactions in block {}", cur_height, txn_matched.len(), conf_height);
- let mut bump_candidates = HashMap::new();
+ let mut have_logged_intro = false;
+ let mut maybe_log_intro = || {
+ if !have_logged_intro {
+ log_debug!(logger, "Updating claims view at height {} with {} matched transactions in block {}", cur_height, txn_matched.len(), conf_height);
+ have_logged_intro = true;
+ }
+ };
+ let mut bump_candidates = new_hash_map();
+ if !txn_matched.is_empty() { maybe_log_intro(); }
for tx in txn_matched {
// Scan all input to verify is one of the outpoint spent is of interest for us
let mut claimed_outputs_material = Vec::new();
self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
for entry in onchain_events_awaiting_threshold_conf {
if entry.has_reached_confirmation_threshold(cur_height) {
+ maybe_log_intro();
match entry.event {
OnchainEvent::Claim { claim_id } => {
// We may remove a whole set of claim outpoints here, as these one may have
}
// Build, bump and rebroadcast tx accordingly
- log_trace!(logger, "Bumping {} candidates", bump_candidates.len());
+ if !bump_candidates.is_empty() {
+ maybe_log_intro();
+ log_trace!(logger, "Bumping {} candidates", bump_candidates.len());
+ }
+
for (claim_id, request) in bump_candidates.iter() {
if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(
- cur_height, &request, true /* force_feerate_bump */, &*fee_estimator, &*logger,
+ cur_height, &request, &FeerateStrategy::ForceBump, &*fee_estimator, &*logger,
) {
match bump_claim {
OnchainClaim::Tx(bump_tx) => {
- log_info!(logger, "Broadcasting RBF-bumped onchain {}", log_tx!(bump_tx));
- broadcaster.broadcast_transactions(&[&bump_tx]);
+ if bump_tx.is_fully_signed() {
+ log_info!(logger, "Broadcasting RBF-bumped onchain {}", log_tx!(bump_tx.0));
+ broadcaster.broadcast_transactions(&[&bump_tx.0]);
+ } else {
+ log_info!(logger, "Waiting for signature of RBF-bumped unsigned onchain transaction {}",
+ bump_tx.0.txid());
+ }
},
OnchainClaim::Event(claim_event) => {
log_info!(logger, "Yielding RBF-bumped onchain event to spend inputs {:?}", request.outpoints());
where B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
{
- let mut bump_candidates = HashMap::new();
+ let mut bump_candidates = new_hash_map();
let onchain_events_awaiting_threshold_conf =
self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
for entry in onchain_events_awaiting_threshold_conf {
// `height` is the height being disconnected, so our `current_height` is 1 lower.
let current_height = height - 1;
if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(
- current_height, &request, true /* force_feerate_bump */, fee_estimator, logger
+ current_height, &request, &FeerateStrategy::ForceBump, fee_estimator, logger
) {
request.set_timer(new_timer);
request.set_feerate(new_feerate);
match bump_claim {
OnchainClaim::Tx(bump_tx) => {
- log_info!(logger, "Broadcasting onchain {}", log_tx!(bump_tx));
- broadcaster.broadcast_transactions(&[&bump_tx]);
+ if bump_tx.is_fully_signed() {
+ log_info!(logger, "Broadcasting onchain {}", log_tx!(bump_tx.0));
+ broadcaster.broadcast_transactions(&[&bump_tx.0]);
+ } else {
+ log_info!(logger, "Waiting for signature of unsigned onchain transaction {}", bump_tx.0.txid());
+ }
},
OnchainClaim::Event(claim_event) => {
log_info!(logger, "Yielding onchain event after reorg to spend inputs {:?}", request.outpoints());
&self.holder_commitment.trust().built_transaction().transaction
}
- //TODO: getting lastest holder transactions should be infallible and result in us "force-closing the channel", but we may
- // have empty holder commitment transaction if a ChannelMonitor is asked to force-close just after OutboundV1Channel::get_funding_created,
- // before providing a initial commitment transaction. For outbound channel, init ChannelMonitor at Channel::funding_signed, there is nothing
- // to monitor before.
- pub(crate) fn get_fully_signed_holder_tx(&mut self, funding_redeemscript: &Script) -> Transaction {
- let sig = self.signer.sign_holder_commitment(&self.holder_commitment, &self.secp_ctx).expect("signing holder commitment");
- self.holder_commitment.add_holder_sig(funding_redeemscript, sig)
+ pub(crate) fn get_maybe_signed_holder_tx(&mut self, funding_redeemscript: &Script) -> MaybeSignedTransaction {
+ let tx = self.signer.sign_holder_commitment(&self.holder_commitment, &self.secp_ctx)
+ .map(|sig| self.holder_commitment.add_holder_sig(funding_redeemscript, sig))
+ .unwrap_or_else(|_| self.get_unsigned_holder_commitment_tx().clone());
+ MaybeSignedTransaction(tx)
}
#[cfg(any(test, feature="unsafe_revoked_tx_signing"))]
self.holder_commitment.add_holder_sig(funding_redeemscript, sig)
}
- pub(crate) fn get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<Transaction> {
+ pub(crate) fn get_maybe_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<MaybeSignedTransaction> {
let get_signed_htlc_tx = |holder_commitment: &HolderCommitmentTransaction| {
let trusted_tx = holder_commitment.trust();
if trusted_tx.txid() != outp.txid {
preimage: preimage.clone(),
counterparty_sig: counterparty_htlc_sig.clone(),
};
- let htlc_sig = self.signer.sign_holder_htlc_transaction(&htlc_tx, 0, &htlc_descriptor, &self.secp_ctx).unwrap();
- htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness(
- htlc_idx, &counterparty_htlc_sig, &htlc_sig, preimage,
- );
- Some(htlc_tx)
+ if let Ok(htlc_sig) = self.signer.sign_holder_htlc_transaction(&htlc_tx, 0, &htlc_descriptor, &self.secp_ctx) {
+ htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness(
+ htlc_idx, &counterparty_htlc_sig, &htlc_sig, preimage,
+ );
+ }
+ Some(MaybeSignedTransaction(htlc_tx))
};
// Check if the HTLC spends from the current holder commitment first, or the previous.
use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
use crate::ln::msgs::DecodeError;
use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, MIN_RELAY_FEE_SAT_PER_1000_WEIGHT, compute_feerate_sat_per_1000_weight, FEERATE_FLOOR_SATS_PER_KW};
+use crate::chain::transaction::MaybeSignedTransaction;
use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
-use crate::chain::onchaintx::{ExternalHTLCClaim, OnchainTxHandler};
+use crate::chain::onchaintx::{FeerateStrategy, ExternalHTLCClaim, OnchainTxHandler};
use crate::util::logger::Logger;
use crate::util::ser::{Readable, Writer, Writeable, RequiredWrapper};
use crate::io;
-use crate::prelude::*;
use core::cmp;
-use core::convert::TryInto;
use core::mem;
use core::ops::Deref;
+#[allow(unused_imports)]
+use crate::prelude::*;
+
use super::chaininterface::LowerBoundedFeeEstimator;
const MAX_ALLOC_SIZE: usize = 64*1024;
}
true
}
- fn get_finalized_tx<Signer: WriteableEcdsaChannelSigner>(&self, outpoint: &BitcoinOutPoint, onchain_handler: &mut OnchainTxHandler<Signer>) -> Option<Transaction> {
+ fn get_maybe_finalized_tx<Signer: WriteableEcdsaChannelSigner>(&self, outpoint: &BitcoinOutPoint, onchain_handler: &mut OnchainTxHandler<Signer>) -> Option<MaybeSignedTransaction> {
match self {
PackageSolvingData::HolderHTLCOutput(ref outp) => {
debug_assert!(!outp.channel_type_features.supports_anchors_zero_fee_htlc_tx());
- return onchain_handler.get_fully_signed_htlc_tx(outpoint, &outp.preimage);
+ onchain_handler.get_maybe_signed_htlc_tx(outpoint, &outp.preimage)
}
PackageSolvingData::HolderFundingOutput(ref outp) => {
- return Some(onchain_handler.get_fully_signed_holder_tx(&outp.funding_redeemscript));
+ Some(onchain_handler.get_maybe_signed_holder_tx(&outp.funding_redeemscript))
}
_ => { panic!("API Error!"); }
}
}
htlcs
}
- pub(crate) fn finalize_malleable_package<L: Logger, Signer: WriteableEcdsaChannelSigner>(
+ pub(crate) fn maybe_finalize_malleable_package<L: Logger, Signer: WriteableEcdsaChannelSigner>(
&self, current_height: u32, onchain_handler: &mut OnchainTxHandler<Signer>, value: u64,
destination_script: ScriptBuf, logger: &L
- ) -> Option<Transaction> {
+ ) -> Option<MaybeSignedTransaction> {
debug_assert!(self.is_malleable());
let mut bumped_tx = Transaction {
version: 2,
}
for (i, (outpoint, out)) in self.inputs.iter().enumerate() {
log_debug!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
- if !out.finalize_input(&mut bumped_tx, i, onchain_handler) { return None; }
+ if !out.finalize_input(&mut bumped_tx, i, onchain_handler) { continue; }
}
- log_debug!(logger, "Finalized transaction {} ready to broadcast", bumped_tx.txid());
- Some(bumped_tx)
+ Some(MaybeSignedTransaction(bumped_tx))
}
- pub(crate) fn finalize_untractable_package<L: Logger, Signer: WriteableEcdsaChannelSigner>(
+ pub(crate) fn maybe_finalize_untractable_package<L: Logger, Signer: WriteableEcdsaChannelSigner>(
&self, onchain_handler: &mut OnchainTxHandler<Signer>, logger: &L,
- ) -> Option<Transaction> {
+ ) -> Option<MaybeSignedTransaction> {
debug_assert!(!self.is_malleable());
if let Some((outpoint, outp)) = self.inputs.first() {
- if let Some(final_tx) = outp.get_finalized_tx(outpoint, onchain_handler) {
+ if let Some(final_tx) = outp.get_maybe_finalized_tx(outpoint, onchain_handler) {
log_debug!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
- log_debug!(logger, "Finalized transaction {} ready to broadcast", final_tx.txid());
return Some(final_tx);
}
return None;
/// which was used to generate the value. Will not return less than `dust_limit_sats` for the
/// value.
pub(crate) fn compute_package_output<F: Deref, L: Logger>(
- &self, predicted_weight: u64, dust_limit_sats: u64, force_feerate_bump: bool,
+ &self, predicted_weight: u64, dust_limit_sats: u64, feerate_strategy: &FeerateStrategy,
fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L,
) -> Option<(u64, u64)>
where F::Target: FeeEstimator,
// If old feerate is 0, first iteration of this claim, use normal fee calculation
if self.feerate_previous != 0 {
if let Some((new_fee, feerate)) = feerate_bump(
- predicted_weight, input_amounts, self.feerate_previous, force_feerate_bump,
+ predicted_weight, input_amounts, self.feerate_previous, feerate_strategy,
fee_estimator, logger,
) {
return Some((cmp::max(input_amounts as i64 - new_fee as i64, dust_limit_sats as i64) as u64, feerate));
None
}
- /// Computes a feerate based on the given confirmation target. If a previous feerate was used,
- /// the new feerate is below it, and `force_feerate_bump` is set, we'll use a 25% increase of
- /// the previous feerate instead of the new feerate.
+ /// Computes a feerate based on the given confirmation target and feerate strategy.
pub(crate) fn compute_package_feerate<F: Deref>(
&self, fee_estimator: &LowerBoundedFeeEstimator<F>, conf_target: ConfirmationTarget,
- force_feerate_bump: bool,
+ feerate_strategy: &FeerateStrategy,
) -> u32 where F::Target: FeeEstimator {
let feerate_estimate = fee_estimator.bounded_sat_per_1000_weight(conf_target);
if self.feerate_previous != 0 {
- // Use the new fee estimate if it's higher than the one previously used.
- if feerate_estimate as u64 > self.feerate_previous {
- feerate_estimate
- } else if !force_feerate_bump {
- self.feerate_previous.try_into().unwrap_or(u32::max_value())
- } else {
- // Our fee estimate has decreased, but our transaction remains unconfirmed after
- // using our previous fee estimate. This may point to an unreliable fee estimator,
- // so we choose to bump our previous feerate by 25%, making sure we don't use a
- // lower feerate or overpay by a large margin by limiting it to 5x the new fee
- // estimate.
- let previous_feerate = self.feerate_previous.try_into().unwrap_or(u32::max_value());
- let mut new_feerate = previous_feerate.saturating_add(previous_feerate / 4);
- if new_feerate > feerate_estimate * 5 {
- new_feerate = cmp::max(feerate_estimate * 5, previous_feerate);
- }
- new_feerate
+ let previous_feerate = self.feerate_previous.try_into().unwrap_or(u32::max_value());
+ match feerate_strategy {
+ FeerateStrategy::RetryPrevious => previous_feerate,
+ FeerateStrategy::HighestOfPreviousOrNew => cmp::max(previous_feerate, feerate_estimate),
+ FeerateStrategy::ForceBump => if feerate_estimate > previous_feerate {
+ feerate_estimate
+ } else {
+ // Our fee estimate has decreased, but our transaction remains unconfirmed after
+ // using our previous fee estimate. This may point to an unreliable fee estimator,
+ // so we choose to bump our previous feerate by 25%, making sure we don't use a
+ // lower feerate or overpay by a large margin by limiting it to 5x the new fee
+ // estimate.
+ let previous_feerate = self.feerate_previous.try_into().unwrap_or(u32::max_value());
+ let mut new_feerate = previous_feerate.saturating_add(previous_feerate / 4);
+ if new_feerate > feerate_estimate * 5 {
+ new_feerate = cmp::max(feerate_estimate * 5, previous_feerate);
+ }
+ new_feerate
+ },
}
} else {
feerate_estimate
/// Attempt to propose a bumping fee for a transaction from its spent output's values and predicted
/// weight. If feerates proposed by the fee-estimator have been increasing since last fee-bumping
-/// attempt, use them. If `force_feerate_bump` is set, we bump the feerate by 25% of the previous
-/// feerate, or just use the previous feerate otherwise. If a feerate bump did happen, we also
-/// verify that those bumping heuristics respect BIP125 rules 3) and 4) and if required adjust the
-/// new fee to meet the RBF policy requirement.
+/// attempt, use them. If we need to force a feerate bump, we manually bump the feerate by 25% of
+/// the previous feerate. If a feerate bump did happen, we also verify that those bumping heuristics
+/// respect BIP125 rules 3) and 4) and if required adjust the new fee to meet the RBF policy
+/// requirement.
fn feerate_bump<F: Deref, L: Logger>(
- predicted_weight: u64, input_amounts: u64, previous_feerate: u64, force_feerate_bump: bool,
+ predicted_weight: u64, input_amounts: u64, previous_feerate: u64, feerate_strategy: &FeerateStrategy,
fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L,
) -> Option<(u64, u64)>
where
{
// If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee...
let (new_fee, new_feerate) = if let Some((new_fee, new_feerate)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, fee_estimator, logger) {
- if new_feerate > previous_feerate {
- (new_fee, new_feerate)
- } else if !force_feerate_bump {
- let previous_fee = previous_feerate * predicted_weight / 1000;
- (previous_fee, previous_feerate)
- } else {
- // ...else just increase the previous feerate by 25% (because that's a nice number)
- let bumped_feerate = previous_feerate + (previous_feerate / 4);
- let bumped_fee = bumped_feerate * predicted_weight / 1000;
- if input_amounts <= bumped_fee {
- log_warn!(logger, "Can't 25% bump new claiming tx, amount {} is too small", input_amounts);
- return None;
- }
- (bumped_fee, bumped_feerate)
+ match feerate_strategy {
+ FeerateStrategy::RetryPrevious => {
+ let previous_fee = previous_feerate * predicted_weight / 1000;
+ (previous_fee, previous_feerate)
+ },
+ FeerateStrategy::HighestOfPreviousOrNew => if new_feerate > previous_feerate {
+ (new_fee, new_feerate)
+ } else {
+ let previous_fee = previous_feerate * predicted_weight / 1000;
+ (previous_fee, previous_feerate)
+ },
+ FeerateStrategy::ForceBump => if new_feerate > previous_feerate {
+ (new_fee, new_feerate)
+ } else {
+ // ...else just increase the previous feerate by 25% (because that's a nice number)
+ let bumped_feerate = previous_feerate + (previous_feerate / 4);
+ let bumped_fee = bumped_feerate * predicted_weight / 1000;
+ if input_amounts <= bumped_fee {
+ log_warn!(logger, "Can't 25% bump new claiming tx, amount {} is too small", input_amounts);
+ return None;
+ }
+ (bumped_fee, bumped_feerate)
+ },
}
} else {
log_warn!(logger, "Can't new-estimation bump new claiming tx, amount {} is too small", input_amounts);
//! Types describing on-chain transactions.
-use crate::ln::ChannelId;
use bitcoin::hash_types::Txid;
-use bitcoin::hashes::Hash;
use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
use bitcoin::blockdata::transaction::Transaction;
}
impl OutPoint {
- /// Convert an `OutPoint` to a lightning channel id.
- pub fn to_channel_id(&self) -> ChannelId {
- ChannelId::v1_from_funding_txid(self.txid.as_byte_array(), self.index)
- }
-
/// Converts this OutPoint into the OutPoint field as used by rust-bitcoin
///
- /// This is not exported to bindings users as the same type is used universally in the C bindings
+ /// This is not exported to bindings users as the same type is used universally in the C bindings
/// for all outpoints
pub fn into_bitcoin_outpoint(self) -> BitcoinOutPoint {
BitcoinOutPoint {
impl_writeable!(OutPoint, { txid, index });
+#[derive(Debug, Clone)]
+pub(crate) struct MaybeSignedTransaction(pub Transaction);
+
+impl MaybeSignedTransaction {
+ pub fn is_fully_signed(&self) -> bool {
+ !self.0.input.iter().any(|input| input.witness.is_empty())
+ }
+}
+
#[cfg(test)]
mod tests {
use crate::chain::transaction::OutPoint;
+ use crate::ln::ChannelId;
use bitcoin::blockdata::transaction::Transaction;
use bitcoin::consensus::encode;
#[test]
fn test_channel_id_calculation() {
let tx: Transaction = encode::deserialize(&<Vec<u8>>::from_hex("020000000001010e0adef48412e4361325ac1c6e36411299ab09d4f083b9d8ddb55fbc06e1b0c00000000000feffffff0220a1070000000000220020f81d95e040bd0a493e38bae27bff52fe2bb58b93b293eb579c01c31b05c5af1dc072cfee54a3000016001434b1d6211af5551905dc2642d05f5b04d25a8fe80247304402207f570e3f0de50546aad25a872e3df059d277e776dda4269fa0d2cc8c2ee6ec9a022054e7fae5ca94d47534c86705857c24ceea3ad51c69dd6051c5850304880fc43a012103cb11a1bacc223d98d91f1946c6752e358a5eb1a1c983b3e6fb15378f453b76bd00000000").unwrap()[..]).unwrap();
- assert_eq!(&OutPoint {
+ assert_eq!(&ChannelId::v1_from_funding_outpoint(OutPoint {
txid: tx.txid(),
index: 0
- }.to_channel_id().0[..], &<Vec<u8>>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25e").unwrap()[..]);
- assert_eq!(&OutPoint {
+ }).0[..], &<Vec<u8>>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25e").unwrap()[..]);
+ assert_eq!(&ChannelId::v1_from_funding_outpoint(OutPoint {
txid: tx.txid(),
index: 1
- }.to_channel_id().0[..], &<Vec<u8>>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25f").unwrap()[..]);
+ }).0[..], &<Vec<u8>>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25f").unwrap()[..]);
}
}
#[cfg(not(fuzzing))]
mod real_chacha {
use core::cmp;
- use core::convert::TryInto;
#[derive(Clone, Copy, PartialEq, Eq)]
#[allow(non_camel_case_types)]
#[cfg(test)]
mod test {
- use alloc::vec;
- use alloc::vec::{Vec};
- use core::convert::TryInto;
use core::iter::repeat;
+ use crate::prelude::*;
+
use super::ChaCha20;
#[test]
+#[cfg(not(fuzzing))]
use bitcoin::hashes::cmp::fixed_time_eq;
pub(crate) mod chacha20;
// https://github.com/floodyberry/poly1305-donna
use core::cmp::min;
-use core::convert::TryInto;
+
+use crate::prelude::*;
#[derive(Clone, Copy)]
pub struct Poly1305 {
#[cfg(test)]
mod test {
use core::iter::repeat;
- use alloc::vec::Vec;
use super::Poly1305;
let writeable_len = $obj.serialized_length() as u64 + 16;
let write_adapter = ChaChaPolyWriteAdapter::new(rho, &$obj);
let encrypted_writeable_bytes = write_adapter.encode();
- let encrypted_writeable = &encrypted_writeable_bytes[..];
+ let encrypted_writeable = &mut &encrypted_writeable_bytes[..];
// Now deserialize the object back and make sure it matches the original.
let mut rd = FixedLengthReader::new(encrypted_writeable, writeable_len);
use crate::chain::ClaimId;
use crate::io_extras::sink;
use crate::ln::channel::ANCHOR_OUTPUT_VALUE_SATOSHI;
+use crate::ln::ChannelId;
use crate::ln::chan_utils;
use crate::ln::chan_utils::{
ANCHOR_INPUT_WITNESS_WEIGHT, HTLC_SUCCESS_INPUT_ANCHOR_WITNESS_WEIGHT,
use bitcoin::consensus::Encodable;
use bitcoin::psbt::PartiallySignedTransaction;
use bitcoin::secp256k1;
-use bitcoin::secp256k1::Secp256k1;
+use bitcoin::secp256k1::{PublicKey, Secp256k1};
use bitcoin::secp256k1::ecdsa::Signature;
-const EMPTY_SCRIPT_SIG_WEIGHT: u64 = 1 /* empty script_sig */ * WITNESS_SCALE_FACTOR as u64;
+pub(crate) const EMPTY_SCRIPT_SIG_WEIGHT: u64 = 1 /* empty script_sig */ * WITNESS_SCALE_FACTOR as u64;
const BASE_INPUT_SIZE: u64 = 32 /* txid */ + 4 /* vout */ + 4 /* sequence */;
-const BASE_INPUT_WEIGHT: u64 = BASE_INPUT_SIZE * WITNESS_SCALE_FACTOR as u64;
+pub(crate) const BASE_INPUT_WEIGHT: u64 = BASE_INPUT_SIZE * WITNESS_SCALE_FACTOR as u64;
/// A descriptor used to sign for a commitment transaction's anchor output.
#[derive(Clone, Debug, PartialEq, Eq)]
/// [`EcdsaChannelSigner::sign_holder_anchor_input`]: crate::sign::ecdsa::EcdsaChannelSigner::sign_holder_anchor_input
/// [`build_anchor_input_witness`]: crate::ln::chan_utils::build_anchor_input_witness
ChannelClose {
+ /// The `channel_id` of the channel which has been closed.
+ channel_id: ChannelId,
+ /// Counterparty in the closed channel.
+ counterparty_node_id: PublicKey,
/// The unique identifier for the claim of the anchor output in the commitment transaction.
///
/// The identifier must map to the set of external UTXOs assigned to the claim, such that
/// [`EcdsaChannelSigner`]: crate::sign::ecdsa::EcdsaChannelSigner
/// [`EcdsaChannelSigner::sign_holder_htlc_transaction`]: crate::sign::ecdsa::EcdsaChannelSigner::sign_holder_htlc_transaction
HTLCResolution {
+ /// The `channel_id` of the channel which has been closed.
+ channel_id: ChannelId,
+ /// Counterparty in the closed channel.
+ counterparty_node_id: PublicKey,
/// The unique identifier for the claim of the HTLCs in the confirmed commitment
/// transaction.
///
/// Returns a new instance backed by the given [`WalletSource`] that serves as an implementation
/// of [`CoinSelectionSource`].
pub fn new(source: W, logger: L) -> Self {
- Self { source, logger, locked_utxos: Mutex::new(HashMap::new()) }
+ Self { source, logger, locked_utxos: Mutex::new(new_hash_map()) }
}
/// Performs coin selection on the set of UTXOs obtained from
}
}
BumpTransactionEvent::HTLCResolution {
- claim_id, target_feerate_sat_per_1000_weight, htlc_descriptors, tx_lock_time,
+ claim_id, target_feerate_sat_per_1000_weight, htlc_descriptors, tx_lock_time, ..
} => {
log_info!(self.logger, "Handling HTLC bump (claim_id = {}, htlcs_to_claim = {})",
log_bytes!(claim_id.0), log_iter!(htlc_descriptors.iter().map(|d| d.outpoint())));
pub use bump_transaction::BumpTransactionEvent;
+use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, PaymentContext, PaymentContextRef};
use crate::sign::SpendableOutputDescriptor;
use crate::ln::channelmanager::{InterceptId, PaymentId, RecipientOnionFields};
use crate::ln::channel::FUNDING_CONF_DEADLINE_BLOCKS;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::secp256k1::PublicKey;
use crate::io;
-use crate::prelude::*;
use core::time::Duration;
use core::ops::Deref;
use crate::sync::Arc;
+#[allow(unused_imports)]
+use crate::prelude::*;
+
/// Some information provided on receipt of payment depends on whether the payment received is a
/// spontaneous payment or a "conventional" lightning payment that's paying an invoice.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum PaymentPurpose {
- /// Information for receiving a payment that we generated an invoice for.
- InvoicePayment {
+ /// A payment for a BOLT 11 invoice.
+ Bolt11InvoicePayment {
/// The preimage to the payment_hash, if the payment hash (and secret) were fetched via
- /// [`ChannelManager::create_inbound_payment`]. If provided, this can be handed directly to
- /// [`ChannelManager::claim_funds`].
+ /// [`ChannelManager::create_inbound_payment`]. When handling [`Event::PaymentClaimable`],
+ /// this can be passed directly to [`ChannelManager::claim_funds`] to claim the payment. No
+ /// action is needed when seen in [`Event::PaymentClaimed`].
///
/// [`ChannelManager::create_inbound_payment`]: crate::ln::channelmanager::ChannelManager::create_inbound_payment
/// [`ChannelManager::claim_funds`]: crate::ln::channelmanager::ChannelManager::claim_funds
/// [`ChannelManager::create_inbound_payment_for_hash`]: crate::ln::channelmanager::ChannelManager::create_inbound_payment_for_hash
payment_secret: PaymentSecret,
},
+ /// A payment for a BOLT 12 [`Offer`].
+ ///
+ /// [`Offer`]: crate::offers::offer::Offer
+ Bolt12OfferPayment {
+ /// The preimage to the payment hash. When handling [`Event::PaymentClaimable`], this can be
+ /// passed directly to [`ChannelManager::claim_funds`], if provided. No action is needed
+ /// when seen in [`Event::PaymentClaimed`].
+ ///
+ /// [`ChannelManager::claim_funds`]: crate::ln::channelmanager::ChannelManager::claim_funds
+ payment_preimage: Option<PaymentPreimage>,
+ /// The secret used to authenticate the sender to the recipient, preventing a number of
+ /// de-anonymization attacks while routing a payment.
+ ///
+ /// See [`PaymentPurpose::Bolt11InvoicePayment::payment_secret`] for further details.
+ payment_secret: PaymentSecret,
+ /// The context of the payment such as information about the corresponding [`Offer`] and
+ /// [`InvoiceRequest`].
+ ///
+ /// [`Offer`]: crate::offers::offer::Offer
+ /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
+ payment_context: Bolt12OfferContext,
+ },
+ /// A payment for a BOLT 12 [`Refund`].
+ ///
+ /// [`Refund`]: crate::offers::refund::Refund
+ Bolt12RefundPayment {
+ /// The preimage to the payment hash. When handling [`Event::PaymentClaimable`], this can be
+ /// passed directly to [`ChannelManager::claim_funds`], if provided. No action is needed
+ /// when seen in [`Event::PaymentClaimed`].
+ ///
+ /// [`ChannelManager::claim_funds`]: crate::ln::channelmanager::ChannelManager::claim_funds
+ payment_preimage: Option<PaymentPreimage>,
+ /// The secret used to authenticate the sender to the recipient, preventing a number of
+ /// de-anonymization attacks while routing a payment.
+ ///
+ /// See [`PaymentPurpose::Bolt11InvoicePayment::payment_secret`] for further details.
+ payment_secret: PaymentSecret,
+ /// The context of the payment such as information about the corresponding [`Refund`].
+ ///
+ /// [`Refund`]: crate::offers::refund::Refund
+ payment_context: Bolt12RefundContext,
+ },
/// Because this is a spontaneous payment, the payer generated their own preimage rather than us
/// (the payee) providing a preimage.
SpontaneousPayment(PaymentPreimage),
/// Returns the preimage for this payment, if it is known.
pub fn preimage(&self) -> Option<PaymentPreimage> {
match self {
- PaymentPurpose::InvoicePayment { payment_preimage, .. } => *payment_preimage,
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, .. } => *payment_preimage,
+ PaymentPurpose::Bolt12OfferPayment { payment_preimage, .. } => *payment_preimage,
+ PaymentPurpose::Bolt12RefundPayment { payment_preimage, .. } => *payment_preimage,
PaymentPurpose::SpontaneousPayment(preimage) => Some(*preimage),
}
}
+
+ pub(crate) fn is_keysend(&self) -> bool {
+ match self {
+ PaymentPurpose::Bolt11InvoicePayment { .. } => false,
+ PaymentPurpose::Bolt12OfferPayment { .. } => false,
+ PaymentPurpose::Bolt12RefundPayment { .. } => false,
+ PaymentPurpose::SpontaneousPayment(..) => true,
+ }
+ }
+
+ pub(crate) fn from_parts(
+ payment_preimage: Option<PaymentPreimage>, payment_secret: PaymentSecret,
+ payment_context: Option<PaymentContext>,
+ ) -> Self {
+ match payment_context {
+ Some(PaymentContext::Unknown(_)) | None => {
+ PaymentPurpose::Bolt11InvoicePayment {
+ payment_preimage,
+ payment_secret,
+ }
+ },
+ Some(PaymentContext::Bolt12Offer(context)) => {
+ PaymentPurpose::Bolt12OfferPayment {
+ payment_preimage,
+ payment_secret,
+ payment_context: context,
+ }
+ },
+ Some(PaymentContext::Bolt12Refund(context)) => {
+ PaymentPurpose::Bolt12RefundPayment {
+ payment_preimage,
+ payment_secret,
+ payment_context: context,
+ }
+ },
+ }
+ }
}
impl_writeable_tlv_based_enum!(PaymentPurpose,
- (0, InvoicePayment) => {
+ (0, Bolt11InvoicePayment) => {
(0, payment_preimage, option),
(2, payment_secret, required),
- };
+ },
+ (4, Bolt12OfferPayment) => {
+ (0, payment_preimage, option),
+ (2, payment_secret, required),
+ (4, payment_context, required),
+ },
+ (6, Bolt12RefundPayment) => {
+ (0, payment_preimage, option),
+ (2, payment_secret, required),
+ (4, payment_context, required),
+ },
+ ;
(2, SpontaneousPayment)
);
HolderForceClosed,
/// The channel was closed after negotiating a cooperative close and we've now broadcasted
/// the cooperative close transaction. Note the shutdown may have been initiated by us.
- //TODO: split between CounterpartyInitiated/LocallyInitiated
- CooperativeClosure,
+ ///
+ /// This was only set in versions of LDK prior to 0.0.122.
+ // Can be removed once we disallow downgrading to 0.0.121
+ LegacyCooperativeClosure,
+ /// The channel was closed after negotiating a cooperative close and we've now broadcasted
+ /// the cooperative close transaction. This indicates that the shutdown was initiated by our
+ /// counterparty.
+ ///
+ /// In rare cases where we initiated closure immediately prior to shutting down without
+ /// persisting, this value may be provided for channels we initiated closure for.
+ CounterpartyInitiatedCooperativeClosure,
+ /// The channel was closed after negotiating a cooperative close and we've now broadcasted
+ /// the cooperative close transaction. This indicates that the shutdown was initiated by us.
+ LocallyInitiatedCooperativeClosure,
/// A commitment transaction was confirmed on chain, closing the channel. Most likely this
/// commitment transaction came from our counterparty, but it may also have come from
/// a copy of our own `ChannelMonitor`.
/// Another channel in the same funding batch closed before the funding transaction
/// was ready to be broadcast.
FundingBatchClosure,
+ /// One of our HTLCs timed out in a channel, causing us to force close the channel.
+ HTLCsTimedOut,
}
impl core::fmt::Display for ClosureReason {
ClosureReason::CounterpartyForceClosed { peer_msg } => {
f.write_fmt(format_args!("counterparty force-closed with message: {}", peer_msg))
},
- ClosureReason::HolderForceClosed => f.write_str("user manually force-closed the channel"),
- ClosureReason::CooperativeClosure => f.write_str("the channel was cooperatively closed"),
+ ClosureReason::HolderForceClosed => f.write_str("user force-closed the channel"),
+ ClosureReason::LegacyCooperativeClosure => f.write_str("the channel was cooperatively closed"),
+ ClosureReason::CounterpartyInitiatedCooperativeClosure => f.write_str("the channel was cooperatively closed by our peer"),
+ ClosureReason::LocallyInitiatedCooperativeClosure => f.write_str("the channel was cooperatively closed by us"),
ClosureReason::CommitmentTxConfirmed => f.write_str("commitment or closing transaction was confirmed on chain."),
ClosureReason::FundingTimedOut => write!(f, "funding transaction failed to confirm within {} blocks", FUNDING_CONF_DEADLINE_BLOCKS),
ClosureReason::ProcessingError { err } => {
ClosureReason::OutdatedChannelManager => f.write_str("the ChannelManager read from disk was stale compared to ChannelMonitor(s)"),
ClosureReason::CounterpartyCoopClosedUnfundedChannel => f.write_str("the peer requested the unfunded channel be closed"),
ClosureReason::FundingBatchClosure => f.write_str("another channel in the same funding batch closed"),
+ ClosureReason::HTLCsTimedOut => f.write_str("htlcs on the channel timed out"),
}
}
}
(1, FundingTimedOut) => {},
(2, HolderForceClosed) => {},
(6, CommitmentTxConfirmed) => {},
- (4, CooperativeClosure) => {},
+ (4, LegacyCooperativeClosure) => {},
(8, ProcessingError) => { (1, err, required) },
(10, DisconnectedPeer) => {},
(12, OutdatedChannelManager) => {},
(13, CounterpartyCoopClosedUnfundedChannel) => {},
- (15, FundingBatchClosure) => {}
+ (15, FundingBatchClosure) => {},
+ (17, CounterpartyInitiatedCooperativeClosure) => {},
+ (19, LocallyInitiatedCooperativeClosure) => {},
+ (21, HTLCsTimedOut) => {},
);
/// Intended destination of a failed HTLC as indicated in [`Event::HTLCHandlingFailed`].
/// Short channel id we are requesting to forward an HTLC to.
requested_forward_scid: u64
},
+ /// We couldn't decode the incoming onion to obtain the forwarding details.
+ InvalidOnion,
/// Failure scenario where an HTLC may have been forwarded to be intended for us,
/// but is invalid for some reason, so we reject it.
///
(2, UnknownNextHop) => {
(0, requested_forward_scid, required),
},
+ (3, InvalidOnion) => {},
(4, FailedPayment) => {
(0, payment_hash, required),
},
/// [`PaymentParameters::expiry_time`]: crate::routing::router::PaymentParameters::expiry_time
PaymentExpired,
/// We failed to find a route while retrying the payment.
+ ///
+ /// Note that this generally indicates that we've exhausted the available set of possible
+ /// routes - we tried the payment over a few routes but were not able to find any further
+ /// candidate routes beyond those.
RouteNotFound,
/// This error should generally never happen. This likely means that there is a problem with
/// your router.
},
/// Used to indicate that an output which you should know how to spend was confirmed on chain
/// and is now spendable.
- /// Such an output will *not* ever be spent by rust-lightning, and are not at risk of your
+ ///
+ /// Such an output will *never* be spent directly by LDK, and are not at risk of your
/// counterparty spending them due to some kind of timeout. Thus, you need to store them
/// somewhere and spend them when you create on-chain transactions.
+ ///
+ /// You may hand them to the [`OutputSweeper`] utility which will store and (re-)generate spending
+ /// transactions for you.
+ ///
+ /// [`OutputSweeper`]: crate::util::sweep::OutputSweeper
SpendableOutputs {
/// The outputs which you should store as spendable by you.
outputs: Vec<SpendableOutputDescriptor>,
/// This event is generated when a payment has been successfully forwarded through us and a
/// forwarding fee earned.
PaymentForwarded {
- /// The incoming channel between the previous node and us. This is only `None` for events
- /// generated or serialized by versions prior to 0.0.107.
+ /// The channel id of the incoming channel between the previous node and us.
+ ///
+ /// This is only `None` for events generated or serialized by versions prior to 0.0.107.
prev_channel_id: Option<ChannelId>,
- /// The outgoing channel between the next node and us. This is only `None` for events
- /// generated or serialized by versions prior to 0.0.107.
+ /// The channel id of the outgoing channel between the next node and us.
+ ///
+ /// This is only `None` for events generated or serialized by versions prior to 0.0.107.
next_channel_id: Option<ChannelId>,
- /// The fee, in milli-satoshis, which was earned as a result of the payment.
+ /// The `user_channel_id` of the incoming channel between the previous node and us.
+ ///
+ /// This is only `None` for events generated or serialized by versions prior to 0.0.122.
+ prev_user_channel_id: Option<u128>,
+ /// The `user_channel_id` of the outgoing channel between the next node and us.
+ ///
+ /// This will be `None` if the payment was settled via an on-chain transaction. See the
+ /// caveat described for the `total_fee_earned_msat` field. Moreover it will be `None` for
+ /// events generated or serialized by versions prior to 0.0.122.
+ next_user_channel_id: Option<u128>,
+ /// The total fee, in milli-satoshis, which was earned as a result of the payment.
///
/// Note that if we force-closed the channel over which we forwarded an HTLC while the HTLC
/// was pending, the amount the next hop claimed will have been rounded down to the nearest
/// If the channel which sent us the payment has been force-closed, we will claim the funds
/// via an on-chain transaction. In that case we do not yet know the on-chain transaction
/// fees which we will spend and will instead set this to `None`. It is possible duplicate
- /// `PaymentForwarded` events are generated for the same payment iff `fee_earned_msat` is
+ /// `PaymentForwarded` events are generated for the same payment iff `total_fee_earned_msat` is
/// `None`.
- fee_earned_msat: Option<u64>,
+ total_fee_earned_msat: Option<u64>,
+ /// The share of the total fee, in milli-satoshis, which was withheld in addition to the
+ /// forwarding fee.
+ ///
+ /// This will only be `Some` if we forwarded an intercepted HTLC with less than the
+ /// expected amount. This means our counterparty accepted to receive less than the invoice
+ /// amount, e.g., by claiming the payment featuring a corresponding
+ /// [`PaymentClaimable::counterparty_skimmed_fee_msat`].
+ ///
+ /// Will also always be `None` for events serialized with LDK prior to version 0.0.122.
+ ///
+ /// The caveat described above the `total_fee_earned_msat` field applies here as well.
+ ///
+ /// [`PaymentClaimable::counterparty_skimmed_fee_msat`]: Self::PaymentClaimable::counterparty_skimmed_fee_msat
+ skimmed_fee_msat: Option<u64>,
/// If this is `true`, the forwarded HTLC was claimed by our counterparty via an on-chain
/// transaction.
claim_from_onchain_tx: bool,
/// The final amount forwarded, in milli-satoshis, after the fee is deducted.
///
- /// The caveat described above the `fee_earned_msat` field applies here as well.
+ /// The caveat described above the `total_fee_earned_msat` field applies here as well.
outbound_amount_forwarded_msat: Option<u64>,
},
/// Used to indicate that a channel with the given `channel_id` is being opened and pending
counterparty_node_id: PublicKey,
/// The outpoint of the channel's funding transaction.
funding_txo: OutPoint,
+ /// The features that this channel will operate with.
+ ///
+ /// Will be `None` for channels created prior to LDK version 0.0.122.
+ channel_type: Option<ChannelTypeFeatures>,
},
/// Used to indicate that a channel with the given `channel_id` is ready to
/// be used. This event is emitted either when the funding transaction has been confirmed
/// The features that this channel will operate with.
channel_type: ChannelTypeFeatures,
},
- /// Used to indicate that a previously opened channel with the given `channel_id` is in the
- /// process of closure.
+ /// Used to indicate that a channel that got past the initial handshake with the given `channel_id` is in the
+ /// process of closure. This includes previously opened channels, and channels that time out from not being funded.
///
/// Note that this event is only triggered for accepted channels: if the
/// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true and the channel is
1u8.write(writer)?;
let mut payment_secret = None;
let payment_preimage;
+ let mut payment_context = None;
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage: preimage, payment_secret: secret } => {
+ PaymentPurpose::Bolt11InvoicePayment {
+ payment_preimage: preimage, payment_secret: secret
+ } => {
payment_secret = Some(secret);
payment_preimage = *preimage;
},
+ PaymentPurpose::Bolt12OfferPayment {
+ payment_preimage: preimage, payment_secret: secret, payment_context: context
+ } => {
+ payment_secret = Some(secret);
+ payment_preimage = *preimage;
+ payment_context = Some(PaymentContextRef::Bolt12Offer(context));
+ },
+ PaymentPurpose::Bolt12RefundPayment {
+ payment_preimage: preimage, payment_secret: secret, payment_context: context
+ } => {
+ payment_secret = Some(secret);
+ payment_preimage = *preimage;
+ payment_context = Some(PaymentContextRef::Bolt12Refund(context));
+ },
PaymentPurpose::SpontaneousPayment(preimage) => {
payment_preimage = Some(*preimage);
}
(8, payment_preimage, option),
(9, onion_fields, option),
(10, skimmed_fee_opt, option),
+ (11, payment_context, option),
});
},
&Event::PaymentSent { ref payment_id, ref payment_preimage, ref payment_hash, ref fee_paid_msat } => {
});
}
&Event::PaymentForwarded {
- fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
- next_channel_id, outbound_amount_forwarded_msat
+ prev_channel_id, next_channel_id, prev_user_channel_id, next_user_channel_id,
+ total_fee_earned_msat, skimmed_fee_msat, claim_from_onchain_tx,
+ outbound_amount_forwarded_msat,
} => {
7u8.write(writer)?;
write_tlv_fields!(writer, {
- (0, fee_earned_msat, option),
+ (0, total_fee_earned_msat, option),
(1, prev_channel_id, option),
(2, claim_from_onchain_tx, required),
(3, next_channel_id, option),
(5, outbound_amount_forwarded_msat, option),
+ (7, skimmed_fee_msat, option),
+ (9, prev_user_channel_id, option),
+ (11, next_user_channel_id, option),
});
},
&Event::ChannelClosed { ref channel_id, ref user_channel_id, ref reason,
(6, channel_type, required),
});
},
- &Event::ChannelPending { ref channel_id, ref user_channel_id, ref former_temporary_channel_id, ref counterparty_node_id, ref funding_txo } => {
+ &Event::ChannelPending { ref channel_id, ref user_channel_id,
+ ref former_temporary_channel_id, ref counterparty_node_id, ref funding_txo,
+ ref channel_type
+ } => {
31u8.write(writer)?;
write_tlv_fields!(writer, {
(0, channel_id, required),
+ (1, channel_type, option),
(2, user_channel_id, required),
(4, former_temporary_channel_id, required),
(6, counterparty_node_id, required),
// Note that we do not write a length-prefixed TLV for FundingGenerationReady events.
0u8 => Ok(None),
1u8 => {
- let f = || {
+ let mut f = || {
let mut payment_hash = PaymentHash([0; 32]);
let mut payment_preimage = None;
let mut payment_secret = None;
let mut claim_deadline = None;
let mut via_user_channel_id = None;
let mut onion_fields = None;
+ let mut payment_context = None;
read_tlv_fields!(reader, {
(0, payment_hash, required),
(1, receiver_node_id, option),
(8, payment_preimage, option),
(9, onion_fields, option),
(10, counterparty_skimmed_fee_msat_opt, option),
+ (11, payment_context, option),
});
let purpose = match payment_secret {
- Some(secret) => PaymentPurpose::InvoicePayment {
- payment_preimage,
- payment_secret: secret
- },
+ Some(secret) => PaymentPurpose::from_parts(payment_preimage, secret, payment_context),
None if payment_preimage.is_some() => PaymentPurpose::SpontaneousPayment(payment_preimage.unwrap()),
None => return Err(msgs::DecodeError::InvalidValue),
};
f()
},
2u8 => {
- let f = || {
+ let mut f = || {
let mut payment_preimage = PaymentPreimage([0; 32]);
let mut payment_hash = None;
let mut payment_id = None;
f()
},
3u8 => {
- let f = || {
+ let mut f = || {
#[cfg(test)]
let error_code = Readable::read(reader)?;
#[cfg(test)]
},
4u8 => Ok(None),
5u8 => {
- let f = || {
+ let mut f = || {
let mut outputs = WithoutLength(Vec::new());
let mut channel_id: Option<ChannelId> = None;
read_tlv_fields!(reader, {
}))
},
7u8 => {
- let f = || {
- let mut fee_earned_msat = None;
+ let mut f = || {
let mut prev_channel_id = None;
- let mut claim_from_onchain_tx = false;
let mut next_channel_id = None;
+ let mut prev_user_channel_id = None;
+ let mut next_user_channel_id = None;
+ let mut total_fee_earned_msat = None;
+ let mut skimmed_fee_msat = None;
+ let mut claim_from_onchain_tx = false;
let mut outbound_amount_forwarded_msat = None;
read_tlv_fields!(reader, {
- (0, fee_earned_msat, option),
+ (0, total_fee_earned_msat, option),
(1, prev_channel_id, option),
(2, claim_from_onchain_tx, required),
(3, next_channel_id, option),
(5, outbound_amount_forwarded_msat, option),
+ (7, skimmed_fee_msat, option),
+ (9, prev_user_channel_id, option),
+ (11, next_user_channel_id, option),
});
Ok(Some(Event::PaymentForwarded {
- fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id,
- outbound_amount_forwarded_msat
+ prev_channel_id, next_channel_id, prev_user_channel_id,
+ next_user_channel_id, total_fee_earned_msat, skimmed_fee_msat,
+ claim_from_onchain_tx, outbound_amount_forwarded_msat,
}))
};
f()
},
9u8 => {
- let f = || {
+ let mut f = || {
let mut channel_id = ChannelId::new_zero();
let mut reason = UpgradableRequired(None);
let mut user_channel_id_low_opt: Option<u64> = None;
f()
},
11u8 => {
- let f = || {
+ let mut f = || {
let mut channel_id = ChannelId::new_zero();
let mut transaction = Transaction{ version: 2, lock_time: LockTime::ZERO, input: Vec::new(), output: Vec::new() };
read_tlv_fields!(reader, {
f()
},
13u8 => {
- let f = || {
+ let mut f = || {
_init_and_read_len_prefixed_tlv_fields!(reader, {
(0, payment_id, required),
(2, payment_hash, option),
f()
},
15u8 => {
- let f = || {
+ let mut f = || {
let mut payment_hash = PaymentHash([0; 32]);
let mut payment_id = PaymentId([0; 32]);
let mut reason = None;
Ok(None)
},
19u8 => {
- let f = || {
+ let mut f = || {
let mut payment_hash = PaymentHash([0; 32]);
let mut purpose = UpgradableRequired(None);
let mut amount_msat = 0;
f()
},
21u8 => {
- let f = || {
+ let mut f = || {
_init_and_read_len_prefixed_tlv_fields!(reader, {
(0, payment_id, required),
(2, payment_hash, required),
f()
},
23u8 => {
- let f = || {
+ let mut f = || {
_init_and_read_len_prefixed_tlv_fields!(reader, {
(0, payment_id, required),
(2, payment_hash, required),
f()
},
25u8 => {
- let f = || {
+ let mut f = || {
let mut prev_channel_id = ChannelId::new_zero();
let mut failed_next_destination_opt = UpgradableRequired(None);
read_tlv_fields!(reader, {
},
27u8 => Ok(None),
29u8 => {
- let f = || {
+ let mut f = || {
let mut channel_id = ChannelId::new_zero();
let mut user_channel_id: u128 = 0;
let mut counterparty_node_id = RequiredWrapper(None);
f()
},
31u8 => {
- let f = || {
+ let mut f = || {
let mut channel_id = ChannelId::new_zero();
let mut user_channel_id: u128 = 0;
let mut former_temporary_channel_id = None;
let mut counterparty_node_id = RequiredWrapper(None);
let mut funding_txo = RequiredWrapper(None);
+ let mut channel_type = None;
read_tlv_fields!(reader, {
(0, channel_id, required),
+ (1, channel_type, option),
(2, user_channel_id, required),
(4, former_temporary_channel_id, required),
(6, counterparty_node_id, required),
user_channel_id,
former_temporary_channel_id,
counterparty_node_id: counterparty_node_id.0.unwrap(),
- funding_txo: funding_txo.0.unwrap()
+ funding_txo: funding_txo.0.unwrap(),
+ channel_type,
}))
};
f()
},
33u8 => {
- let f = || {
+ let mut f = || {
_init_and_read_len_prefixed_tlv_fields!(reader, {
(0, payment_id, required),
});
pub use core2::io;
#[cfg(not(feature = "std"))]
-mod io_extras {
+#[doc(hidden)]
+/// IO utilities public only for use by in-crate macros. These should not be used externally
+pub mod io_extras {
use core2::io::{self, Read, Write};
/// A writer which will move data into the void.
}
#[cfg(feature = "std")]
+#[doc(hidden)]
+/// IO utilities public only for use by in-crate macros. These should not be used externally
mod io_extras {
pub fn read_to_end<D: ::std::io::Read>(mut d: D) -> Result<Vec<u8>, ::std::io::Error> {
let mut buf = Vec::new();
}
mod prelude {
- #[cfg(feature = "hashbrown")]
- extern crate hashbrown;
+ #![allow(unused_imports)]
pub use alloc::{vec, vec::Vec, string::String, collections::VecDeque, boxed::Box};
- #[cfg(not(feature = "hashbrown"))]
- pub use std::collections::{HashMap, HashSet, hash_map};
- #[cfg(feature = "hashbrown")]
- pub use self::hashbrown::{HashMap, HashSet, hash_map};
pub use alloc::borrow::ToOwned;
pub use alloc::string::ToString;
+
+ pub use core::convert::{AsMut, AsRef, TryFrom, TryInto};
+ pub use core::default::Default;
+ pub use core::marker::Sized;
+
+ pub(crate) use crate::util::hash_tables::*;
}
#[cfg(all(not(ldk_bench), feature = "backtrace", feature = "std", test))]
//! Tests for asynchronous signing. These tests verify that the channel state machine behaves
//! properly with a signer implementation that asynchronously derives signatures.
-use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+use bitcoin::{Transaction, TxOut, TxIn, Amount};
+use bitcoin::blockdata::locktime::absolute::LockTime;
+
+use crate::chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS;
+use crate::events::bump_transaction::WalletSource;
+use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::ChannelMessageHandler;
use crate::ln::channelmanager::{PaymentId, RecipientOnionFields};
// nodes[0] <-- accept_channel --- nodes[1]
let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
- assert_eq!(accept_channel.minimum_depth, 0, "Expected minimum depth of 0");
+ assert_eq!(accept_channel.common_fields.minimum_depth, 0, "Expected minimum depth of 0");
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
// nodes[0] --- funding_created --> nodes[1]
};
}
}
+
+fn do_test_async_holder_signatures(anchors: bool, remote_commitment: bool) {
+ // Ensures that we can obtain holder signatures for commitment and HTLC transactions
+ // asynchronously by allowing their retrieval to fail and retrying via
+ // `ChannelMonitor::signer_unblocked`.
+ let mut config = test_default_channel_config();
+ if anchors {
+ config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
+ config.manually_accept_inbound_channels = true;
+ }
+
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let closing_node = if remote_commitment { &nodes[1] } else { &nodes[0] };
+ let coinbase_tx = Transaction {
+ version: 2,
+ lock_time: LockTime::ZERO,
+ input: vec![TxIn { ..Default::default() }],
+ output: vec![
+ TxOut {
+ value: Amount::ONE_BTC.to_sat(),
+ script_pubkey: closing_node.wallet_source.get_change_script().unwrap(),
+ },
+ ],
+ };
+ if anchors {
+ *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2;
+ *nodes[1].fee_estimator.sat_per_kw.lock().unwrap() *= 2;
+ closing_node.wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.txid(), vout: 0 }, coinbase_tx.output[0].value);
+ }
+
+ // Route an HTLC and set the signer as unavailable.
+ let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
+ route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+
+ nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, false);
+
+ if remote_commitment {
+ // Make the counterparty broadcast its latest commitment.
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id()).unwrap();
+ check_added_monitors(&nodes[1], 1);
+ check_closed_broadcast(&nodes[1], 1, true);
+ check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100_000);
+ } else {
+ // We'll connect blocks until the sender has to go onchain to time out the HTLC.
+ connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
+
+ // No transaction should be broadcast since the signer is not available yet.
+ assert!(nodes[0].tx_broadcaster.txn_broadcast().is_empty());
+ assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+
+ // Mark it as available now, we should see the signed commitment transaction.
+ nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, true);
+ get_monitor!(nodes[0], chan_id).signer_unblocked(nodes[0].tx_broadcaster, nodes[0].fee_estimator, &nodes[0].logger);
+ }
+
+ let commitment_tx = {
+ let mut txn = closing_node.tx_broadcaster.txn_broadcast();
+ if anchors || remote_commitment {
+ assert_eq!(txn.len(), 1);
+ check_spends!(txn[0], funding_tx);
+ txn.remove(0)
+ } else {
+ assert_eq!(txn.len(), 2);
+ if txn[0].input[0].previous_output.txid == funding_tx.txid() {
+ check_spends!(txn[0], funding_tx);
+ check_spends!(txn[1], txn[0]);
+ txn.remove(0)
+ } else {
+ check_spends!(txn[1], funding_tx);
+ check_spends!(txn[0], txn[1]);
+ txn.remove(1)
+ }
+ }
+ };
+
+ // Mark it as unavailable again to now test the HTLC transaction. We'll mine the commitment such
+ // that the HTLC transaction is retried.
+ nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, false);
+ mine_transaction(&nodes[0], &commitment_tx);
+
+ check_added_monitors(&nodes[0], 1);
+ check_closed_broadcast(&nodes[0], 1, true);
+ check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[1].node.get_our_node_id()], 100_000);
+
+ // If the counterparty broadcast its latest commitment, we need to mine enough blocks for the
+ // HTLC timeout.
+ if remote_commitment {
+ connect_blocks(&nodes[0], TEST_FINAL_CLTV);
+ }
+
+ // No HTLC transaction should be broadcast as the signer is not available yet.
+ if anchors && !remote_commitment {
+ handle_bump_htlc_event(&nodes[0], 1);
+ }
+ assert!(nodes[0].tx_broadcaster.txn_broadcast().is_empty());
+
+ // Mark it as available now, we should see the signed HTLC transaction.
+ nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, true);
+ get_monitor!(nodes[0], chan_id).signer_unblocked(nodes[0].tx_broadcaster, nodes[0].fee_estimator, &nodes[0].logger);
+
+ if anchors && !remote_commitment {
+ handle_bump_htlc_event(&nodes[0], 1);
+ }
+ {
+ let txn = nodes[0].tx_broadcaster.txn_broadcast();
+ assert_eq!(txn.len(), 1);
+ check_spends!(txn[0], commitment_tx, coinbase_tx);
+ }
+}
+
+#[test]
+fn test_async_holder_signatures() {
+ do_test_async_holder_signatures(false, false);
+ do_test_async_holder_signatures(false, true);
+ do_test_async_holder_signatures(true, false);
+ do_test_async_holder_signatures(true, true);
+}
use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
use crate::blinded_path::BlindedPath;
-use crate::blinded_path::payment::{ForwardNode, ForwardTlvs, PaymentConstraints, PaymentRelay, ReceiveTlvs};
+use crate::blinded_path::payment::{ForwardNode, ForwardTlvs, PaymentConstraints, PaymentContext, PaymentRelay, ReceiveTlvs};
use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PaymentFailureReason};
use crate::ln::PaymentSecret;
use crate::ln::channelmanager;
use crate::ln::msgs::ChannelMessageHandler;
use crate::ln::onion_utils;
use crate::ln::onion_utils::INVALID_ONION_BLINDING;
-use crate::ln::outbound_payment::Retry;
+use crate::ln::outbound_payment::{Retry, IDEMPOTENCY_TIMEOUT_TICKS};
use crate::offers::invoice::BlindedPayInfo;
use crate::prelude::*;
use crate::routing::router::{Payee, PaymentParameters, RouteParameters};
use crate::util::test_utils;
fn blinded_payment_path(
- payment_secret: PaymentSecret, node_ids: Vec<PublicKey>,
- channel_upds: &[&msgs::UnsignedChannelUpdate], keys_manager: &test_utils::TestKeysInterface
+ payment_secret: PaymentSecret, intro_node_min_htlc: u64, intro_node_max_htlc: u64,
+ node_ids: Vec<PublicKey>, channel_upds: &[&msgs::UnsignedChannelUpdate],
+ keys_manager: &test_utils::TestKeysInterface
) -> (BlindedPayInfo, BlindedPath) {
let mut intermediate_nodes = Vec::new();
- for (node_id, chan_upd) in node_ids.iter().zip(channel_upds) {
+ let mut intro_node_min_htlc_opt = Some(intro_node_min_htlc);
+ let mut intro_node_max_htlc_opt = Some(intro_node_max_htlc);
+ for (idx, (node_id, chan_upd)) in node_ids.iter().zip(channel_upds).enumerate() {
intermediate_nodes.push(ForwardNode {
node_id: *node_id,
tlvs: ForwardTlvs {
},
payment_constraints: PaymentConstraints {
max_cltv_expiry: u32::max_value(),
- htlc_minimum_msat: chan_upd.htlc_minimum_msat,
+ htlc_minimum_msat: intro_node_min_htlc_opt.take()
+ .unwrap_or_else(|| channel_upds[idx - 1].htlc_minimum_msat),
},
features: BlindedHopFeatures::empty(),
},
- htlc_maximum_msat: chan_upd.htlc_maximum_msat,
+ htlc_maximum_msat: intro_node_max_htlc_opt.take()
+ .unwrap_or_else(|| channel_upds[idx - 1].htlc_maximum_msat),
});
}
let payee_tlvs = ReceiveTlvs {
payment_secret,
payment_constraints: PaymentConstraints {
max_cltv_expiry: u32::max_value(),
- htlc_minimum_msat: channel_upds.last().unwrap().htlc_minimum_msat,
+ htlc_minimum_msat:
+ intro_node_min_htlc_opt.unwrap_or_else(|| channel_upds.last().unwrap().htlc_minimum_msat),
},
+ payment_context: PaymentContext::unknown(),
};
let mut secp_ctx = Secp256k1::new();
BlindedPath::new_for_payment(
&intermediate_nodes[..], *node_ids.last().unwrap(), payee_tlvs,
- channel_upds.last().unwrap().htlc_maximum_msat, keys_manager, &secp_ctx
+ intro_node_max_htlc_opt.unwrap_or_else(|| channel_upds.last().unwrap().htlc_maximum_msat),
+ TEST_FINAL_CLTV as u16, keys_manager, &secp_ctx
).unwrap()
}
pub fn get_blinded_route_parameters(
- amt_msat: u64, payment_secret: PaymentSecret, node_ids: Vec<PublicKey>,
- channel_upds: &[&msgs::UnsignedChannelUpdate], keys_manager: &test_utils::TestKeysInterface
+ amt_msat: u64, payment_secret: PaymentSecret, intro_node_min_htlc: u64, intro_node_max_htlc: u64,
+ node_ids: Vec<PublicKey>, channel_upds: &[&msgs::UnsignedChannelUpdate],
+ keys_manager: &test_utils::TestKeysInterface
) -> RouteParameters {
RouteParameters::from_payment_params_and_value(
PaymentParameters::blinded(vec![
- blinded_payment_path(payment_secret, node_ids, channel_upds, keys_manager)
+ blinded_payment_path(
+ payment_secret, intro_node_min_htlc, intro_node_max_htlc, node_ids, channel_upds,
+ keys_manager
+ )
]), amt_msat
)
}
max_cltv_expiry: u32::max_value(),
htlc_minimum_msat: chan_upd.htlc_minimum_msat,
},
+ payment_context: PaymentContext::unknown(),
};
let mut secp_ctx = Secp256k1::new();
let blinded_path = BlindedPath::one_hop_for_payment(
- nodes[1].node.get_our_node_id(), payee_tlvs, &chanmon_cfgs[1].keys_manager, &secp_ctx
+ nodes[1].node.get_our_node_id(), payee_tlvs, TEST_FINAL_CLTV as u16,
+ &chanmon_cfgs[1].keys_manager, &secp_ctx
).unwrap();
let route_params = RouteParameters::from_payment_params_and_value(
max_cltv_expiry: u32::max_value(),
htlc_minimum_msat: chan_upd_1_3.htlc_minimum_msat,
},
+ payment_context: PaymentContext::unknown(),
};
let blinded_path = BlindedPath::one_hop_for_payment(
- nodes[3].node.get_our_node_id(), payee_tlvs, &chanmon_cfgs[3].keys_manager, &secp_ctx
+ nodes[3].node.get_our_node_id(), payee_tlvs, TEST_FINAL_CLTV as u16,
+ &chanmon_cfgs[3].keys_manager, &secp_ctx
).unwrap();
let bolt12_features =
claim_payment_along_route(&nodes[0], expected_route, false, payment_preimage);
}
+#[test]
+fn mpp_to_three_hop_blinded_paths() {
+ let chanmon_cfgs = create_chanmon_cfgs(6);
+ let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
+ let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
+
+ // Create this network topology so node 0 MPP's over 2 3-hop blinded paths:
+ // n1 -- n3
+ // / \
+ // n0 n5
+ // \ /
+ // n2 -- n4
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+ create_announced_chan_between_nodes(&nodes, 0, 2);
+ let chan_upd_1_3 = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents;
+ let chan_upd_2_4 = create_announced_chan_between_nodes(&nodes, 2, 4).0.contents;
+ let chan_upd_3_5 = create_announced_chan_between_nodes(&nodes, 3, 5).0.contents;
+ let chan_upd_4_5 = create_announced_chan_between_nodes(&nodes, 4, 5).0.contents;
+
+ let amt_msat = 15_000_000;
+ let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[5], Some(amt_msat), None);
+ let route_params = {
+ let path_1_params = get_blinded_route_parameters(
+ amt_msat, payment_secret, 1, 1_0000_0000, vec![
+ nodes[1].node.get_our_node_id(), nodes[3].node.get_our_node_id(),
+ nodes[5].node.get_our_node_id()
+ ], &[&chan_upd_1_3, &chan_upd_3_5], &chanmon_cfgs[5].keys_manager
+ );
+ let path_2_params = get_blinded_route_parameters(
+ amt_msat, payment_secret, 1, 1_0000_0000, vec![
+ nodes[2].node.get_our_node_id(), nodes[4].node.get_our_node_id(),
+ nodes[5].node.get_our_node_id()
+ ], &[&chan_upd_2_4, &chan_upd_4_5], &chanmon_cfgs[5].keys_manager
+ );
+ let pay_params = PaymentParameters::blinded(
+ vec![
+ path_1_params.payment_params.payee.blinded_route_hints()[0].clone(),
+ path_2_params.payment_params.payee.blinded_route_hints()[0].clone()
+ ]
+ )
+ .with_bolt12_features(channelmanager::provided_bolt12_invoice_features(&UserConfig::default()))
+ .unwrap();
+ RouteParameters::from_payment_params_and_value(pay_params, amt_msat)
+ };
+
+ nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(),
+ PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+ check_added_monitors(&nodes[0], 2);
+
+ let expected_route: &[&[&Node]] = &[&[&nodes[1], &nodes[3], &nodes[5]], &[&nodes[2], &nodes[4], &nodes[5]]];
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+
+ let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
+ pass_along_path(&nodes[0], expected_route[0], amt_msat, payment_hash.clone(),
+ Some(payment_secret), ev.clone(), false, None);
+
+ let ev = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
+ pass_along_path(&nodes[0], expected_route[1], amt_msat, payment_hash.clone(),
+ Some(payment_secret), ev.clone(), true, None);
+ claim_payment_along_route(&nodes[0], expected_route, false, payment_preimage);
+}
+
enum ForwardCheckFail {
// Fail a check on the inbound onion payload. In this case, we underflow when calculating the
// outgoing cltv_expiry.
#[test]
fn forward_checks_failure() {
- do_forward_checks_failure(ForwardCheckFail::InboundOnionCheck);
- do_forward_checks_failure(ForwardCheckFail::ForwardPayloadEncodedAsReceive);
- do_forward_checks_failure(ForwardCheckFail::OutboundChannelCheck);
+ do_forward_checks_failure(ForwardCheckFail::InboundOnionCheck, true);
+ do_forward_checks_failure(ForwardCheckFail::InboundOnionCheck, false);
+ do_forward_checks_failure(ForwardCheckFail::ForwardPayloadEncodedAsReceive, true);
+ do_forward_checks_failure(ForwardCheckFail::ForwardPayloadEncodedAsReceive, false);
+ do_forward_checks_failure(ForwardCheckFail::OutboundChannelCheck, true);
+ do_forward_checks_failure(ForwardCheckFail::OutboundChannelCheck, false);
}
-fn do_forward_checks_failure(check: ForwardCheckFail) {
+fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) {
// Ensure we'll fail backwards properly if a forwarding check fails on initial update_add
// receipt.
- let chanmon_cfgs = create_chanmon_cfgs(3);
- let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
- let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
// We need the session priv to construct a bogus onion packet later.
*nodes[0].keys_manager.override_random_bytes.lock().unwrap() = Some([3; 32]);
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
let chan_upd_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents;
+ let chan_upd_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).0.contents;
let amt_msat = 5000;
- let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
- let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
- nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2],
- &chanmon_cfgs[2].keys_manager);
+ let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[3], Some(amt_msat), None);
+ let route_params = get_blinded_route_parameters(amt_msat, payment_secret, 1, 1_0000_0000,
+ nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(),
+ &[&chan_upd_1_2, &chan_upd_2_3], &chanmon_cfgs[3].keys_manager);
let route = get_route(&nodes[0], &route_params).unwrap();
node_cfgs[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
check_added_monitors(&nodes[0], 1);
- let mut events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
- let mut payment_event = SendEvent::from_event(ev);
+ macro_rules! cause_error {
+ ($src_node_idx: expr, $target_node_idx: expr, $update_add: expr) => {
+ match check {
+ ForwardCheckFail::InboundOnionCheck => {
+ $update_add.cltv_expiry = 10; // causes outbound CLTV expiry to underflow
+ },
+ ForwardCheckFail::ForwardPayloadEncodedAsReceive => {
+ let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
+ let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
+ let cur_height = nodes[0].best_block_info().1;
+ let (mut onion_payloads, ..) = onion_utils::build_onion_payloads(
+ &route.paths[0], amt_msat, RecipientOnionFields::spontaneous_empty(), cur_height, &None).unwrap();
+ // Remove the receive payload so the blinded forward payload is encoded as a final payload
+ // (i.e. next_hop_hmac == [0; 32])
+ onion_payloads.pop();
+ if $target_node_idx + 1 < nodes.len() {
+ onion_payloads.pop();
+ }
+ $update_add.onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
+ },
+ ForwardCheckFail::OutboundChannelCheck => {
+ // The intro node will see that the next-hop peer is disconnected and fail the HTLC backwards.
+ nodes[$src_node_idx].node.peer_disconnected(&nodes[$target_node_idx].node.get_our_node_id());
+ }
+ }
+ }
+ }
- let mut update_add = &mut payment_event.msgs[0];
- match check {
- ForwardCheckFail::InboundOnionCheck => {
- update_add.cltv_expiry = 10; // causes outbound CLTV expiry to underflow
- },
- ForwardCheckFail::ForwardPayloadEncodedAsReceive => {
- let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
- let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
- let cur_height = nodes[0].best_block_info().1;
- let (mut onion_payloads, ..) = onion_utils::build_onion_payloads(
- &route.paths[0], amt_msat, RecipientOnionFields::spontaneous_empty(), cur_height, &None).unwrap();
- // Remove the receive payload so the blinded forward payload is encoded as a final payload
- // (i.e. next_hop_hmac == [0; 32])
- onion_payloads.pop();
- update_add.onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
- },
- ForwardCheckFail::OutboundChannelCheck => {
- // The intro node will see that the next-hop peer is disconnected and fail the HTLC backwards.
- nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
- },
+ let mut updates_0_1 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ let update_add = &mut updates_0_1.update_add_htlcs[0];
+
+ if intro_fails {
+ cause_error!(1, 2, update_add);
}
- nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &update_add);
check_added_monitors!(nodes[1], 0);
- do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, true, true);
+ do_commitment_signed_dance(&nodes[1], &nodes[0], &updates_0_1.commitment_signed, true, true);
+
+ if intro_fails {
+ let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+ do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false);
+ expect_payment_failed_conditions(&nodes[0], payment_hash, false,
+ PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
+ return
+ }
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+
+ let mut updates_1_2 = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+ let mut update_add = &mut updates_1_2.update_add_htlcs[0];
+
+ cause_error!(2, 3, update_add);
+
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add);
+ check_added_monitors!(nodes[2], 0);
+ do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, true, true);
+
+ let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ let update_malformed = &mut updates.update_fail_malformed_htlcs[0];
+ assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING);
+ assert_eq!(update_malformed.sha256_of_onion, [0; 32]);
+
+ // Ensure the intro node will properly blind the error if its downstream node failed to do so.
+ update_malformed.sha256_of_onion = [1; 32];
+ update_malformed.failure_code = INVALID_ONION_BLINDING ^ 1;
+ nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), update_malformed);
+ do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, true, false);
let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
let amt_msat = 5000;
let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
- let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+ let route_params = get_blinded_route_parameters(amt_msat, payment_secret, 1, 1_0000_0000,
nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2],
&chanmon_cfgs[2].keys_manager);
#[test]
fn forward_fail_in_process_pending_htlc_fwds() {
- do_forward_fail_in_process_pending_htlc_fwds(ProcessPendingHTLCsCheck::FwdPeerDisconnected);
- do_forward_fail_in_process_pending_htlc_fwds(ProcessPendingHTLCsCheck::FwdChannelClosed);
+ do_forward_fail_in_process_pending_htlc_fwds(ProcessPendingHTLCsCheck::FwdPeerDisconnected, true);
+ do_forward_fail_in_process_pending_htlc_fwds(ProcessPendingHTLCsCheck::FwdPeerDisconnected, false);
+ do_forward_fail_in_process_pending_htlc_fwds(ProcessPendingHTLCsCheck::FwdChannelClosed, true);
+ do_forward_fail_in_process_pending_htlc_fwds(ProcessPendingHTLCsCheck::FwdChannelClosed, false);
}
-fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck) {
+fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, intro_fails: bool) {
// Ensure the intro node will error backwards properly if the HTLC fails in
// process_pending_htlc_forwards.
- let chanmon_cfgs = create_chanmon_cfgs(3);
- let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
- let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
- let (chan_upd_1_2, channel_id) = {
+ let (chan_upd_1_2, chan_id_1_2) = {
let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0);
(chan.0.contents, chan.2)
};
+ let (chan_upd_2_3, chan_id_2_3) = {
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0);
+ (chan.0.contents, chan.2)
+ };
let amt_msat = 5000;
let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
- let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
- nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2],
+ let route_params = get_blinded_route_parameters(amt_msat, payment_secret, 1, 1_0000_0000,
+ nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2, &chan_upd_2_3],
&chanmon_cfgs[2].keys_manager);
nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
check_added_monitors!(nodes[1], 0);
do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false);
- match check {
- ProcessPendingHTLCsCheck::FwdPeerDisconnected => {
- // Disconnect the next-hop peer so when we go to forward in process_pending_htlc_forwards, the
- // intro node will error backwards.
- nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
- expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],
- vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id }]);
- },
- ProcessPendingHTLCsCheck::FwdChannelClosed => {
- // Force close the next-hop channel so when we go to forward in process_pending_htlc_forwards,
- // the intro node will error backwards.
- nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
- let events = nodes[1].node.get_and_clear_pending_events();
- match events[0] {
- crate::events::Event::PendingHTLCsForwardable { .. } => {},
- _ => panic!("Unexpected event {:?}", events),
- };
- match events[1] {
- crate::events::Event::ChannelClosed { .. } => {},
- _ => panic!("Unexpected event {:?}", events),
+ macro_rules! cause_error {
+ ($prev_node: expr, $curr_node: expr, $next_node: expr, $failed_chan_id: expr, $failed_scid: expr) => {
+ match check {
+ ProcessPendingHTLCsCheck::FwdPeerDisconnected => {
+ // Disconnect the next-hop peer so when we go to forward in process_pending_htlc_forwards, the
+ // intro node will error backwards.
+ $curr_node.node.peer_disconnected(&$next_node.node.get_our_node_id());
+ expect_pending_htlcs_forwardable!($curr_node);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($curr_node,
+ vec![HTLCDestination::NextHopChannel { node_id: Some($next_node.node.get_our_node_id()), channel_id: $failed_chan_id }]);
+ },
+ ProcessPendingHTLCsCheck::FwdChannelClosed => {
+ // Force close the next-hop channel so when we go to forward in process_pending_htlc_forwards,
+ // the intro node will error backwards.
+ $curr_node.node.force_close_broadcasting_latest_txn(&$failed_chan_id, &$next_node.node.get_our_node_id()).unwrap();
+ let events = $curr_node.node.get_and_clear_pending_events();
+ match events[0] {
+ crate::events::Event::PendingHTLCsForwardable { .. } => {},
+ _ => panic!("Unexpected event {:?}", events),
+ };
+ match events[1] {
+ crate::events::Event::ChannelClosed { .. } => {},
+ _ => panic!("Unexpected event {:?}", events),
+ }
+
+ $curr_node.node.process_pending_htlc_forwards();
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($curr_node,
+ vec![HTLCDestination::UnknownNextHop { requested_forward_scid: $failed_scid }]);
+ check_closed_broadcast(&$curr_node, 1, true);
+ check_added_monitors!($curr_node, 1);
+ $curr_node.node.process_pending_htlc_forwards();
+ },
}
+ }
+ }
- nodes[1].node.process_pending_htlc_forwards();
- expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],
- vec![HTLCDestination::UnknownNextHop { requested_forward_scid: chan_upd_1_2.short_channel_id }]);
- check_closed_broadcast(&nodes[1], 1, true);
- check_added_monitors!(nodes[1], 1);
- nodes[1].node.process_pending_htlc_forwards();
- },
+ if intro_fails {
+ cause_error!(nodes[0], nodes[1], nodes[2], chan_id_1_2, chan_upd_1_2.short_channel_id);
+ let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+ check_added_monitors!(nodes[1], 1);
+ do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false);
+
+ expect_payment_failed_conditions(&nodes[0], payment_hash, false,
+ PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
+ return
}
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+
+ let mut updates_1_2 = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+ let mut update_add = &mut updates_1_2.update_add_htlcs[0];
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add);
+ check_added_monitors!(nodes[2], 0);
+ do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, true, true);
+
+ cause_error!(nodes[1], nodes[2], nodes[3], chan_id_2_3, chan_upd_2_3.short_channel_id);
+ check_added_monitors!(nodes[2], 1);
+
+ let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ let update_malformed = &mut updates.update_fail_malformed_htlcs[0];
+ assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING);
+ assert_eq!(update_malformed.sha256_of_onion, [0; 32]);
+
+ // Ensure the intro node will properly blind the error if its downstream node failed to do so.
+ update_malformed.sha256_of_onion = [1; 32];
+ update_malformed.failure_code = INVALID_ONION_BLINDING ^ 1;
+ nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), update_malformed);
+ do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, true, false);
+
let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
- check_added_monitors!(nodes[1], 1);
do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false);
-
expect_payment_failed_conditions(&nodes[0], payment_hash, false,
PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
}
let intercept_scid = nodes[1].node.get_intercept_scid();
let mut intercept_chan_upd = chan_upd;
intercept_chan_upd.short_channel_id = intercept_scid;
- let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+ let route_params = get_blinded_route_parameters(amt_msat, payment_secret, 1, 1_0000_0000,
nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&intercept_chan_upd],
&chanmon_cfgs[2].keys_manager);
let amt_msat = 5000;
let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
- let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+ let route_params = get_blinded_route_parameters(amt_msat, payment_secret, 1, 1_0000_0000,
nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2],
&chanmon_cfgs[2].keys_manager);
let chan_upd_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).0.contents;
let chan_upd_3_4 = create_announced_chan_between_nodes_with_value(&nodes, 3, 4, 1_000_000, 0).0.contents;
+ // Get all our nodes onto the same height so payments don't fail for CLTV violations.
+ connect_blocks(&nodes[0], nodes[4].best_block_info().1 - nodes[0].best_block_info().1);
+ connect_blocks(&nodes[1], nodes[4].best_block_info().1 - nodes[1].best_block_info().1);
+ connect_blocks(&nodes[2], nodes[4].best_block_info().1 - nodes[2].best_block_info().1);
+ assert_eq!(nodes[4].best_block_info().1, nodes[3].best_block_info().1);
+
let amt_msat = 5000;
let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[4], Some(amt_msat), None);
- let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+ let route_params = get_blinded_route_parameters(amt_msat, payment_secret, 1, 1_0000_0000,
nodes.iter().skip(2).map(|n| n.node.get_our_node_id()).collect(),
&[&chan_upd_2_3, &chan_upd_3_4], &chanmon_cfgs[4].keys_manager);
claim_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3], &nodes[4]], payment_preimage);
}
+#[test]
+fn three_hop_blinded_path_fail() {
+ // Test that an intermediate blinded forwarding node gets failed back to with
+ // malformed and also fails back themselves with malformed.
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+ let chan_upd_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents;
+ let chan_upd_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).0.contents;
+
+ let amt_msat = 5000;
+ let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[3], Some(amt_msat), None);
+ let route_params = get_blinded_route_parameters(amt_msat, payment_secret, 1, 1_0000_0000,
+ nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(),
+ &[&chan_upd_1_2, &chan_upd_2_3], &chanmon_cfgs[3].keys_manager);
+
+ nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+ check_added_monitors(&nodes[0], 1);
+ pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2], &nodes[3]]], amt_msat, payment_hash, payment_secret);
+
+ nodes[3].node.fail_htlc_backwards(&payment_hash);
+ expect_pending_htlcs_forwardable_conditions(
+ nodes[3].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]
+ );
+ nodes[3].node.process_pending_htlc_forwards();
+ check_added_monitors!(nodes[3], 1);
+
+ let updates_3_2 = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
+ assert_eq!(updates_3_2.update_fail_malformed_htlcs.len(), 1);
+ let update_malformed = &updates_3_2.update_fail_malformed_htlcs[0];
+ assert_eq!(update_malformed.sha256_of_onion, [0; 32]);
+ assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING);
+ nodes[2].node.handle_update_fail_malformed_htlc(&nodes[3].node.get_our_node_id(), update_malformed);
+ do_commitment_signed_dance(&nodes[2], &nodes[3], &updates_3_2.commitment_signed, true, false);
+
+ let updates_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ assert_eq!(updates_2_1.update_fail_malformed_htlcs.len(), 1);
+ let update_malformed = &updates_2_1.update_fail_malformed_htlcs[0];
+ assert_eq!(update_malformed.sha256_of_onion, [0; 32]);
+ assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING);
+ nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), update_malformed);
+ do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, true, false);
+
+ let updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert_eq!(updates_1_0.update_fail_htlcs.len(), 1);
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_1_0.update_fail_htlcs[0]);
+ do_commitment_signed_dance(&nodes[0], &nodes[1], &updates_1_0.commitment_signed, false, false);
+ expect_payment_failed_conditions(&nodes[0], payment_hash, false,
+ PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
+}
+
#[derive(PartialEq)]
enum ReceiveCheckFail {
// The recipient fails the payment upon `PaymentClaimable`.
Some(TEST_FINAL_CLTV as u16 - 2)
} else { None };
let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), excess_final_cltv_delta_opt);
- let mut route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+ let mut route_params = get_blinded_route_parameters(amt_msat, payment_secret, 1, 1_0000_0000,
nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2],
&chanmon_cfgs[2].keys_manager);
let high_htlc_min_bp = {
let mut high_htlc_minimum_upd = chan_upd_1_2.clone();
high_htlc_minimum_upd.htlc_minimum_msat = amt_msat + 1000;
- let high_htlc_min_params = get_blinded_route_parameters(amt_msat, payment_secret,
+ let high_htlc_min_params = get_blinded_route_parameters(amt_msat, payment_secret, 1, 1_0000_0000,
nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&high_htlc_minimum_upd],
&chanmon_cfgs[2].keys_manager);
if let Payee::Blinded { route_hints, .. } = high_htlc_min_params.payment_params.payee {
commitment_signed_dance!(nodes[2], nodes[1], (), false, true, false, false);
},
ReceiveCheckFail::ProcessPendingHTLCsCheck => {
- assert_eq!(payment_event_1_2.msgs[0].cltv_expiry, nodes[0].best_block_info().1 + 1 + excess_final_cltv_delta_opt.unwrap() as u32);
+ assert_eq!(payment_event_1_2.msgs[0].cltv_expiry, nodes[0].best_block_info().1 + 1 + excess_final_cltv_delta_opt.unwrap() as u32 + TEST_FINAL_CLTV);
nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]);
check_added_monitors!(nodes[2], 0);
do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true);
let route_params = {
let pay_params = PaymentParameters::blinded(
vec![
- blinded_payment_path(payment_secret,
+ blinded_payment_path(payment_secret, 1, 1_0000_0000,
vec![nodes[1].node.get_our_node_id(), nodes[3].node.get_our_node_id()], &[&chan_1_3.0.contents],
&chanmon_cfgs[3].keys_manager
),
- blinded_payment_path(payment_secret,
+ blinded_payment_path(payment_secret, 1, 1_0000_0000,
vec![nodes[2].node.get_our_node_id(), nodes[3].node.get_our_node_id()], &[&chan_2_3.0.contents],
&chanmon_cfgs[3].keys_manager
),
_ => panic!()
}
}
+
+#[test]
+fn min_htlc() {
+ // The min htlc of a blinded path is the max (htlc_min - following_fees) along the path. Make sure
+ // the payment succeeds when we calculate the min htlc this way.
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let mut node_1_cfg = test_default_channel_config();
+ node_1_cfg.channel_handshake_config.our_htlc_minimum_msat = 2000;
+ node_1_cfg.channel_config.forwarding_fee_base_msat = 1000;
+ node_1_cfg.channel_config.forwarding_fee_proportional_millionths = 100_000;
+ let mut node_2_cfg = test_default_channel_config();
+ node_2_cfg.channel_handshake_config.our_htlc_minimum_msat = 5000;
+ node_2_cfg.channel_config.forwarding_fee_base_msat = 200;
+ node_2_cfg.channel_config.forwarding_fee_proportional_millionths = 150_000;
+ let mut node_3_cfg = test_default_channel_config();
+ node_3_cfg.channel_handshake_config.our_htlc_minimum_msat = 2000;
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, Some(node_1_cfg), Some(node_2_cfg), Some(node_3_cfg)]);
+ let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+ let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0);
+ let chan_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0);
+
+ let min_htlc_msat = {
+ // The min htlc for this setup is nodes[2]'s htlc_minimum_msat minus the
+ // following fees.
+ let post_base_fee = chan_2_3.1.contents.htlc_minimum_msat - chan_2_3.0.contents.fee_base_msat as u64;
+ let prop_fee = chan_2_3.0.contents.fee_proportional_millionths as u64;
+ (post_base_fee * 1_000_000 + 1_000_000 + prop_fee - 1) / (prop_fee + 1_000_000)
+ };
+ let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[3], Some(min_htlc_msat), None);
+ let mut route_params = get_blinded_route_parameters(
+ min_htlc_msat, payment_secret, chan_1_2.1.contents.htlc_minimum_msat,
+ chan_1_2.1.contents.htlc_maximum_msat, vec![nodes[1].node.get_our_node_id(),
+ nodes[2].node.get_our_node_id(), nodes[3].node.get_our_node_id()],
+ &[&chan_1_2.0.contents, &chan_2_3.0.contents], &chanmon_cfgs[3].keys_manager);
+ assert_eq!(min_htlc_msat,
+ route_params.payment_params.payee.blinded_route_hints()[0].0.htlc_minimum_msat);
+
+ nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0)).unwrap();
+ check_added_monitors(&nodes[0], 1);
+ pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2], &nodes[3]]], min_htlc_msat, payment_hash, payment_secret);
+ claim_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], payment_preimage);
+
+ // Paying 1 less than the min fails.
+ for _ in 0..IDEMPOTENCY_TIMEOUT_TICKS + 1 {
+ nodes[0].node.timer_tick_occurred();
+ }
+ if let Payee::Blinded { ref mut route_hints, .. } = route_params.payment_params.payee {
+ route_hints[0].0.htlc_minimum_msat -= 1;
+ } else { panic!() }
+ route_params.final_value_msat -= 1;
+ nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+ check_added_monitors(&nodes[0], 1);
+
+ let mut payment_event_0_1 = {
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
+ SendEvent::from_event(ev)
+ };
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_0_1.msgs[0]);
+ check_added_monitors!(nodes[1], 0);
+ do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, true, true);
+ let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+ do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false);
+ expect_payment_failed_conditions(&nodes[0], payment_hash, false,
+ PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
+}
+
+#[test]
+fn conditionally_round_fwd_amt() {
+ // Previously, the (rng-found) feerates below caught a bug where an intermediate node would
+ // calculate an amt_to_forward that underpaid them by 1 msat, caused by rounding up the outbound
+ // amount on top of an already rounded-up total routing fee. Ensure that we'll conditionally round
+ // down intermediate nodes' outbound amounts based on whether rounding up will result in
+ // undercharging for relay.
+ let chanmon_cfgs = create_chanmon_cfgs(5);
+ let node_cfgs = create_node_cfgs(5, &chanmon_cfgs);
+
+ let mut node_1_cfg = test_default_channel_config();
+ node_1_cfg.channel_config.forwarding_fee_base_msat = 247371;
+ node_1_cfg.channel_config.forwarding_fee_proportional_millionths = 86552;
+
+ let mut node_2_cfg = test_default_channel_config();
+ node_2_cfg.channel_config.forwarding_fee_base_msat = 198921;
+ node_2_cfg.channel_config.forwarding_fee_proportional_millionths = 681759;
+
+ let mut node_3_cfg = test_default_channel_config();
+ node_3_cfg.channel_config.forwarding_fee_base_msat = 132845;
+ node_3_cfg.channel_config.forwarding_fee_proportional_millionths = 552561;
+
+ let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, Some(node_1_cfg), Some(node_2_cfg), Some(node_3_cfg), None]);
+ let nodes = create_network(5, &node_cfgs, &node_chanmgrs);
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+ let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0);
+ let chan_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0);
+ let chan_3_4 = create_announced_chan_between_nodes_with_value(&nodes, 3, 4, 1_000_000, 0);
+
+ let amt_msat = 100_000;
+ let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[4], Some(amt_msat), None);
+ let mut route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+ chan_1_2.1.contents.htlc_minimum_msat, chan_1_2.1.contents.htlc_maximum_msat,
+ vec![nodes[1].node.get_our_node_id(), nodes[2].node.get_our_node_id(),
+ nodes[3].node.get_our_node_id(), nodes[4].node.get_our_node_id()],
+ &[&chan_1_2.0.contents, &chan_2_3.0.contents, &chan_3_4.0.contents],
+ &chanmon_cfgs[4].keys_manager);
+ route_params.max_total_routing_fee_msat = None;
+
+ nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+ check_added_monitors(&nodes[0], 1);
+ pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2], &nodes[3], &nodes[4]]], amt_msat, payment_hash, payment_secret);
+ nodes[4].node.claim_funds(payment_preimage);
+ let expected_path = &[&nodes[1], &nodes[2], &nodes[3], &nodes[4]];
+ let expected_route = &[&expected_path[..]];
+ let mut args = ClaimAlongRouteArgs::new(&nodes[0], &expected_route[..], payment_preimage)
+ .allow_1_msat_fee_overpay();
+ let expected_fee = pass_claimed_payment_along_route(args);
+ expect_payment_sent(&nodes[0], payment_preimage, Some(Some(expected_fee)), true, true);
+}
+
+#[test]
+fn blinded_keysend() {
+ let mut mpp_keysend_config = test_default_channel_config();
+ mpp_keysend_config.accept_mpp_keysend = true;
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, Some(mpp_keysend_config)]);
+ let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+ let chan_upd_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents;
+
+ let amt_msat = 5000;
+ let (keysend_preimage, _, payment_secret) = get_payment_preimage_hash(&nodes[2], None, None);
+ let route_params = get_blinded_route_parameters(amt_msat, payment_secret, 1,
+ 1_0000_0000,
+ nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(),
+ &[&chan_upd_1_2], &chanmon_cfgs[2].keys_manager);
+
+ let payment_hash = nodes[0].node.send_spontaneous_payment_with_retry(Some(keysend_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(keysend_preimage.0), route_params, Retry::Attempts(0)).unwrap();
+ check_added_monitors(&nodes[0], 1);
+
+ let expected_route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]];
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+
+ let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
+ pass_along_path(&nodes[0], expected_route[0], amt_msat, payment_hash, Some(payment_secret), ev.clone(), true, Some(keysend_preimage));
+ claim_payment_along_route(&nodes[0], expected_route, false, keysend_preimage);
+}
+
+#[test]
+fn blinded_mpp_keysend() {
+ let mut mpp_keysend_config = test_default_channel_config();
+ mpp_keysend_config.accept_mpp_keysend = true;
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, Some(mpp_keysend_config)]);
+ let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+ create_announced_chan_between_nodes(&nodes, 0, 2);
+ let chan_1_3 = create_announced_chan_between_nodes(&nodes, 1, 3);
+ let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
+
+ let amt_msat = 15_000_000;
+ let (keysend_preimage, _, payment_secret) = get_payment_preimage_hash(&nodes[3], None, None);
+ let route_params = {
+ let pay_params = PaymentParameters::blinded(
+ vec![
+ blinded_payment_path(payment_secret, 1, 1_0000_0000,
+ vec![nodes[1].node.get_our_node_id(), nodes[3].node.get_our_node_id()], &[&chan_1_3.0.contents],
+ &chanmon_cfgs[3].keys_manager
+ ),
+ blinded_payment_path(payment_secret, 1, 1_0000_0000,
+ vec![nodes[2].node.get_our_node_id(), nodes[3].node.get_our_node_id()], &[&chan_2_3.0.contents],
+ &chanmon_cfgs[3].keys_manager
+ ),
+ ]
+ )
+ .with_bolt12_features(channelmanager::provided_bolt12_invoice_features(&UserConfig::default()))
+ .unwrap();
+ RouteParameters::from_payment_params_and_value(pay_params, amt_msat)
+ };
+
+ let payment_hash = nodes[0].node.send_spontaneous_payment_with_retry(Some(keysend_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(keysend_preimage.0), route_params, Retry::Attempts(0)).unwrap();
+ check_added_monitors!(nodes[0], 2);
+
+ let expected_route: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]];
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+
+ let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
+ pass_along_path(&nodes[0], expected_route[0], amt_msat, payment_hash.clone(),
+ Some(payment_secret), ev.clone(), false, Some(keysend_preimage));
+
+ let ev = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
+ pass_along_path(&nodes[0], expected_route[1], amt_msat, payment_hash.clone(),
+ Some(payment_secret), ev.clone(), true, Some(keysend_preimage));
+ claim_payment_along_route(&nodes[0], expected_route, false, keysend_preimage);
+}
+
+#[test]
+fn custom_tlvs_to_blinded_path() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ let chan_upd = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0).0.contents;
+
+ let amt_msat = 5000;
+ let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(amt_msat), None);
+ let payee_tlvs = ReceiveTlvs {
+ payment_secret,
+ payment_constraints: PaymentConstraints {
+ max_cltv_expiry: u32::max_value(),
+ htlc_minimum_msat: chan_upd.htlc_minimum_msat,
+ },
+ payment_context: PaymentContext::unknown(),
+ };
+ let mut secp_ctx = Secp256k1::new();
+ let blinded_path = BlindedPath::one_hop_for_payment(
+ nodes[1].node.get_our_node_id(), payee_tlvs, TEST_FINAL_CLTV as u16,
+ &chanmon_cfgs[1].keys_manager, &secp_ctx
+ ).unwrap();
+
+ let route_params = RouteParameters::from_payment_params_and_value(
+ PaymentParameters::blinded(vec![blinded_path]),
+ amt_msat,
+ );
+
+ let recipient_onion_fields = RecipientOnionFields::spontaneous_empty()
+ .with_custom_tlvs(vec![((1 << 16) + 1, vec![42, 42])])
+ .unwrap();
+ nodes[0].node.send_payment(payment_hash, recipient_onion_fields.clone(),
+ PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+ check_added_monitors(&nodes[0], 1);
+
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
+
+ let path = &[&nodes[1]];
+ let args = PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash, ev)
+ .with_payment_secret(payment_secret)
+ .with_custom_tlvs(recipient_onion_fields.custom_tlvs.clone());
+ do_pass_along_path(args);
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
+}
use bitcoin::PublicKey as BitcoinPublicKey;
use crate::io;
-use crate::prelude::*;
use core::cmp;
use crate::ln::chan_utils;
use crate::util::transaction_utils::sort_outputs;
use crate::crypto::utils::{sign, sign_with_aux_rand};
use super::channel_keys::{DelayedPaymentBasepoint, DelayedPaymentKey, HtlcKey, HtlcBasepoint, RevocationKey, RevocationBasepoint};
+#[allow(unused_imports)]
+use crate::prelude::*;
+
/// Maximum number of one-way in-flight HTLC (protocol-level value).
pub const MAX_HTLCS: u16 = 483;
/// The weight of a BIP141 witnessScript for a BOLT3's "offered HTLC output" on a commitment transaction, non-anchor variant.
self.counterparty_parameters.is_some() && self.funding_outpoint.is_some()
}
+ /// Whether the channel supports zero-fee HTLC transaction anchors.
+ pub(crate) fn supports_anchors(&self) -> bool {
+ self.channel_type_features.supports_anchors_zero_fee_htlc_tx()
+ }
+
/// Convert the holder/counterparty parameters to broadcaster/countersignatory-organized parameters,
/// given that the holder is the broadcaster.
///
mod tests {
use super::{CounterpartyCommitmentSecrets, ChannelPublicKeys};
use crate::chain;
- use crate::prelude::*;
use crate::ln::chan_utils::{get_htlc_redeemscript, get_to_countersignatory_with_anchors_redeemscript, CommitmentTransaction, TxCreationKeys, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, HTLCOutputInCommitment};
use bitcoin::secp256k1::{PublicKey, SecretKey, Secp256k1};
use crate::util::test_utils;
use bitcoin::PublicKey as BitcoinPublicKey;
use crate::ln::features::ChannelTypeFeatures;
+ #[allow(unused_imports)]
+ use crate::prelude::*;
+
struct TestCommitmentTxBuilder {
commitment_number: u64,
holder_funding_pubkey: PublicKey,
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields};
use crate::ln::channel::{AnnouncementSigsState, ChannelPhase};
-use crate::ln::msgs;
+use crate::ln::{msgs, ChannelId};
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
use crate::util::test_channel_signer::TestChannelSigner;
use crate::util::errors::APIError;
assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
assert_eq!(via_channel_id, Some(channel_id));
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
assert_eq!(payment_secret_1, *payment_secret);
},
- _ => panic!("expected PaymentPurpose::InvoicePayment")
+ _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
}
},
_ => panic!("Unexpected event"),
assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
assert_eq!(via_channel_id, Some(channel_id));
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
assert_eq!(payment_secret_2, *payment_secret);
},
- _ => panic!("expected PaymentPurpose::InvoicePayment")
+ _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
}
},
_ => panic!("Unexpected event"),
assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
assert_eq!(via_channel_id, Some(channel_id));
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
assert_eq!(our_payment_secret, *payment_secret);
},
- _ => panic!("expected PaymentPurpose::InvoicePayment")
+ _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
}
},
_ => panic!("Unexpected event"),
assert_eq!(via_channel_id, Some(channel_id));
assert_eq!(via_user_channel_id, Some(42));
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
assert_eq!(payment_secret_2, *payment_secret);
},
- _ => panic!("expected PaymentPurpose::InvoicePayment")
+ _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
}
},
_ => panic!("Unexpected event"),
assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
assert_eq!(via_channel_id, Some(channel_id));
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
assert_eq!(payment_secret_3, *payment_secret);
},
- _ => panic!("expected PaymentPurpose::InvoicePayment")
+ _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
}
},
_ => panic!("Unexpected event"),
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
- let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
+ let channel_id = ChannelId::v1_from_funding_outpoint(OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index });
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
check_added_monitors!(nodes[1], 1);
send_payment(&nodes[0], &[&nodes[1]], 8000000);
close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
assert_eq!(txn_a, txn_b);
assert_eq!(txn_a.len(), 1);
check_spends!(txn_a[0], funding_tx);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
let bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().2;
let mut events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), if close_during_reload { 2 } else { 1 });
- expect_payment_forwarded(events.pop().unwrap(), &nodes[1], &nodes[0], &nodes[2], Some(1000), close_during_reload, false);
+ expect_payment_forwarded(events.pop().unwrap(), &nodes[1], &nodes[0], &nodes[2], Some(1000),
+ None, close_during_reload, false, false);
if close_during_reload {
match events[0] {
Event::ChannelClosed { .. } => {},
use crate::io;
use crate::prelude::*;
use core::{cmp,mem,fmt};
-use core::convert::TryInto;
use core::ops::Deref;
#[cfg(any(test, fuzzing, debug_assertions))]
use crate::sync::Mutex;
Fulfill(PaymentPreimage),
}
+/// Represents the resolution status of an inbound HTLC.
+#[derive(Clone)]
+enum InboundHTLCResolution {
+ /// Resolved implies the action we must take with the inbound HTLC has already been determined,
+ /// i.e., we already know whether it must be failed back or forwarded.
+ //
+ // TODO: Once this variant is removed, we should also clean up
+ // [`MonitorRestoreUpdates::accepted_htlcs`] as the path will be unreachable.
+ Resolved {
+ pending_htlc_status: PendingHTLCStatus,
+ },
+ /// Pending implies we will attempt to resolve the inbound HTLC once it has been fully committed
+ /// to by both sides of the channel, i.e., once a `revoke_and_ack` has been processed by both
+ /// nodes for the state update in which it was proposed.
+ Pending {
+ update_add_htlc: msgs::UpdateAddHTLC,
+ },
+}
+
+impl_writeable_tlv_based_enum!(InboundHTLCResolution,
+ (0, Resolved) => {
+ (0, pending_htlc_status, required),
+ },
+ (2, Pending) => {
+ (0, update_add_htlc, required),
+ };
+);
+
enum InboundHTLCState {
/// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
/// update_add_htlc message for this HTLC.
- RemoteAnnounced(PendingHTLCStatus),
+ RemoteAnnounced(InboundHTLCResolution),
/// Included in a received commitment_signed message (implying we've
/// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
/// state (see the example below). We have not yet included this HTLC in a
/// Implies AwaitingRemoteRevoke.
///
/// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
- AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
+ AwaitingRemoteRevokeToAnnounce(InboundHTLCResolution),
/// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
/// We have also included this HTLC in our latest commitment_signed and are now just waiting
/// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
/// channel (before it can then get forwarded and/or removed).
/// Implies AwaitingRemoteRevoke.
- AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
+ AwaitingAnnouncedRemoteRevoke(InboundHTLCResolution),
Committed,
/// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
/// created it we would have put it in the holding cell instead). When they next revoke_and_ack
LocalRemoved(InboundHTLCRemovalReason),
}
+/// Exposes the state of pending inbound HTLCs.
+///
+/// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
+/// through the following states in the state machine:
+/// - Announced for addition by the originating node through the update_add_htlc message.
+/// - Added to the commitment transaction of the receiving node and originating node in turn
+/// through the exchange of commitment_signed and revoke_and_ack messages.
+/// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
+/// the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
+/// - Removed from the commitment transaction of the originating node and receiving node in turn
+/// through the exchange of commitment_signed and revoke_and_ack messages.
+///
+/// This can be used to inspect what next message an HTLC is waiting for to advance its state.
+#[derive(Clone, Debug, PartialEq)]
+pub enum InboundHTLCStateDetails {
+ /// We have added this HTLC in our commitment transaction by receiving commitment_signed and
+ /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
+ /// before this HTLC is included on the remote commitment transaction.
+ AwaitingRemoteRevokeToAdd,
+ /// This HTLC has been included in the commitment_signed and revoke_and_ack messages on both sides
+ /// and is included in both commitment transactions.
+ ///
+ /// This HTLC is now safe to either forward or be claimed as a payment by us. The HTLC will
+ /// remain in this state until the forwarded upstream HTLC has been resolved and we resolve this
+ /// HTLC correspondingly, or until we claim it as a payment. If it is part of a multipart
+ /// payment, it will only be claimed together with other required parts.
+ Committed,
+ /// We have received the preimage for this HTLC and it is being removed by fulfilling it with
+ /// update_fulfill_htlc. This HTLC is still on both commitment transactions, but we are awaiting
+ /// the appropriate revoke_and_ack's from the remote before this HTLC is removed from the remote
+ /// commitment transaction after update_fulfill_htlc.
+ AwaitingRemoteRevokeToRemoveFulfill,
+ /// The HTLC is being removed by failing it with update_fail_htlc or update_fail_malformed_htlc.
+ /// This HTLC is still on both commitment transactions, but we are awaiting the appropriate
+ /// revoke_and_ack's from the remote before this HTLC is removed from the remote commitment
+ /// transaction.
+ AwaitingRemoteRevokeToRemoveFail,
+}
+
+impl From<&InboundHTLCState> for Option<InboundHTLCStateDetails> {
+ fn from(state: &InboundHTLCState) -> Option<InboundHTLCStateDetails> {
+ match state {
+ InboundHTLCState::RemoteAnnounced(_) => None,
+ InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) =>
+ Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
+ InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) =>
+ Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
+ InboundHTLCState::Committed =>
+ Some(InboundHTLCStateDetails::Committed),
+ InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(_)) =>
+ Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
+ InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(_)) =>
+ Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
+ InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) =>
+ Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill),
+ }
+ }
+}
+
+impl_writeable_tlv_based_enum_upgradable!(InboundHTLCStateDetails,
+ (0, AwaitingRemoteRevokeToAdd) => {},
+ (2, Committed) => {},
+ (4, AwaitingRemoteRevokeToRemoveFulfill) => {},
+ (6, AwaitingRemoteRevokeToRemoveFail) => {};
+);
+
struct InboundHTLCOutput {
htlc_id: u64,
amount_msat: u64,
state: InboundHTLCState,
}
+/// Exposes details around pending inbound HTLCs.
+#[derive(Clone, Debug, PartialEq)]
+pub struct InboundHTLCDetails {
+ /// The HTLC ID.
+ /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
+ /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
+ /// and not part of any commitment transaction.
+ pub htlc_id: u64,
+ /// The amount in msat.
+ pub amount_msat: u64,
+ /// The block height at which this HTLC expires.
+ pub cltv_expiry: u32,
+ /// The payment hash.
+ pub payment_hash: PaymentHash,
+ /// The state of the HTLC in the state machine.
+ ///
+ /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
+ /// waiting for to advance to the next state.
+ ///
+ /// See [`InboundHTLCStateDetails`] for information on the specific states.
+ ///
+ /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
+ /// states may result in `None` here.
+ pub state: Option<InboundHTLCStateDetails>,
+ /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
+ /// from the local commitment transaction and added to the commitment transaction fee.
+ /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
+ /// transactions as well.
+ ///
+ /// When the local commitment transaction is broadcasted as part of a unilateral closure,
+ /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
+ /// fee.
+ ///
+ /// Note that dust limits are specific to each party. An HTLC can be dust for the local
+ /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
+ pub is_dust: bool,
+}
+
+impl_writeable_tlv_based!(InboundHTLCDetails, {
+ (0, htlc_id, required),
+ (2, amount_msat, required),
+ (4, cltv_expiry, required),
+ (6, payment_hash, required),
+ (7, state, upgradable_option),
+ (8, is_dust, required),
+});
+
#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
enum OutboundHTLCState {
/// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
}
+/// Exposes the state of pending outbound HTLCs.
+///
+/// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
+/// through the following states in the state machine:
+/// - Announced for addition by the originating node through the update_add_htlc message.
+/// - Added to the commitment transaction of the receiving node and originating node in turn
+/// through the exchange of commitment_signed and revoke_and_ack messages.
+/// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
+/// the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
+/// - Removed from the commitment transaction of the originating node and receiving node in turn
+/// through the exchange of commitment_signed and revoke_and_ack messages.
+///
+/// This can be used to inspect what next message an HTLC is waiting for to advance its state.
+#[derive(Clone, Debug, PartialEq)]
+pub enum OutboundHTLCStateDetails {
+ /// We are awaiting the appropriate revoke_and_ack's from the remote before the HTLC is added
+ /// on the remote's commitment transaction after update_add_htlc.
+ AwaitingRemoteRevokeToAdd,
+ /// The HTLC has been added to the remote's commitment transaction by sending commitment_signed
+ /// and receiving revoke_and_ack in return.
+ ///
+ /// The HTLC will remain in this state until the remote node resolves the HTLC, or until we
+ /// unilaterally close the channel due to a timeout with an uncooperative remote node.
+ Committed,
+ /// The HTLC has been fulfilled successfully by the remote with a preimage in update_fulfill_htlc,
+ /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
+ /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
+ /// for the removal from its commitment transaction.
+ AwaitingRemoteRevokeToRemoveSuccess,
+ /// The HTLC has been failed by the remote with update_fail_htlc or update_fail_malformed_htlc,
+ /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
+ /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
+ /// for the removal from its commitment transaction.
+ AwaitingRemoteRevokeToRemoveFailure,
+}
+
+impl From<&OutboundHTLCState> for OutboundHTLCStateDetails {
+ fn from(state: &OutboundHTLCState) -> OutboundHTLCStateDetails {
+ match state {
+ OutboundHTLCState::LocalAnnounced(_) =>
+ OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd,
+ OutboundHTLCState::Committed =>
+ OutboundHTLCStateDetails::Committed,
+ // RemoteRemoved states are ignored as the state is transient and the remote has not committed to
+ // the state yet.
+ OutboundHTLCState::RemoteRemoved(_) =>
+ OutboundHTLCStateDetails::Committed,
+ OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) =>
+ OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
+ OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Failure(_)) =>
+ OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
+ OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) =>
+ OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
+ OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Failure(_)) =>
+ OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
+ }
+ }
+}
+
+impl_writeable_tlv_based_enum_upgradable!(OutboundHTLCStateDetails,
+ (0, AwaitingRemoteRevokeToAdd) => {},
+ (2, Committed) => {},
+ (4, AwaitingRemoteRevokeToRemoveSuccess) => {},
+ (6, AwaitingRemoteRevokeToRemoveFailure) => {};
+);
+
#[derive(Clone)]
#[cfg_attr(test, derive(Debug, PartialEq))]
enum OutboundHTLCOutcome {
skimmed_fee_msat: Option<u64>,
}
+/// Exposes details around pending outbound HTLCs.
+#[derive(Clone, Debug, PartialEq)]
+pub struct OutboundHTLCDetails {
+ /// The HTLC ID.
+ /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
+ /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
+ /// and not part of any commitment transaction.
+ ///
+ /// Not present when we are awaiting a remote revocation and the HTLC is not added yet.
+ pub htlc_id: Option<u64>,
+ /// The amount in msat.
+ pub amount_msat: u64,
+ /// The block height at which this HTLC expires.
+ pub cltv_expiry: u32,
+ /// The payment hash.
+ pub payment_hash: PaymentHash,
+ /// The state of the HTLC in the state machine.
+ ///
+ /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
+ /// waiting for to advance to the next state.
+ ///
+ /// See [`OutboundHTLCStateDetails`] for information on the specific states.
+ ///
+ /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
+ /// states may result in `None` here.
+ pub state: Option<OutboundHTLCStateDetails>,
+ /// The extra fee being skimmed off the top of this HTLC.
+ pub skimmed_fee_msat: Option<u64>,
+ /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
+ /// from the local commitment transaction and added to the commitment transaction fee.
+ /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
+ /// transactions as well.
+ ///
+ /// When the local commitment transaction is broadcasted as part of a unilateral closure,
+ /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
+ /// fee.
+ ///
+ /// Note that dust limits are specific to each party. An HTLC can be dust for the local
+ /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
+ pub is_dust: bool,
+}
+
+impl_writeable_tlv_based!(OutboundHTLCDetails, {
+ (0, htlc_id, required),
+ (2, amount_msat, required),
+ (4, cltv_expiry, required),
+ (6, payment_hash, required),
+ (7, state, upgradable_option),
+ (8, skimmed_fee_msat, required),
+ (10, is_dust, required),
+});
+
/// See AwaitingRemoteRevoke ChannelState for more info
#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
enum HTLCUpdateAwaitingACK {
}
macro_rules! define_state_flags {
- ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
+ ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr, $get: ident, $set: ident, $clear: ident)),+], $extra_flags: expr) => {
#[doc = $flag_type_doc]
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
struct $flag_type(u32);
#[allow(unused)]
fn is_empty(&self) -> bool { self.0 == 0 }
-
#[allow(unused)]
fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
+ #[allow(unused)]
+ fn set(&mut self, flag: Self) { *self |= flag }
+ #[allow(unused)]
+ fn clear(&mut self, flag: Self) -> Self { self.0 &= !flag.0; *self }
}
- impl core::ops::Not for $flag_type {
- type Output = Self;
- fn not(self) -> Self::Output { Self(!self.0) }
- }
+ $(
+ define_state_flags!($flag_type, Self::$flag, $get, $set, $clear);
+ )*
+
impl core::ops::BitOr for $flag_type {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
};
+ ($flag_type: ident, $flag: expr, $get: ident, $set: ident, $clear: ident) => {
+ impl $flag_type {
+ #[allow(unused)]
+ fn $get(&self) -> bool { self.is_set($flag_type::new() | $flag) }
+ #[allow(unused)]
+ fn $set(&mut self) { self.set($flag_type::new() | $flag) }
+ #[allow(unused)]
+ fn $clear(&mut self) -> Self { self.clear($flag_type::new() | $flag) }
+ }
+ };
($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
+
+ define_state_flags!($flag_type, FundedStateFlags::PEER_DISCONNECTED,
+ is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected);
+ define_state_flags!($flag_type, FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS,
+ is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress);
+ define_state_flags!($flag_type, FundedStateFlags::REMOTE_SHUTDOWN_SENT,
+ is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent);
+ define_state_flags!($flag_type, FundedStateFlags::LOCAL_SHUTDOWN_SENT,
+ is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent);
+
impl core::ops::BitOr<FundedStateFlags> for $flag_type {
type Output = Self;
fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
"Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
FundedStateFlags, [
("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
- until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
+ until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED,
+ is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected),
("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
somewhere and we should pause sending any outbound messages until they've managed to \
- complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
+ complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS,
+ is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress),
("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
- message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
+ message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT,
+ is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent),
("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
- the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
+ the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT,
+ is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent)
]
);
"Flags that only apply to [`ChannelState::NegotiatingFunding`].",
NegotiatingFundingFlags, [
("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
- OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
+ OUR_INIT_SENT, state_flags::OUR_INIT_SENT, is_our_init_sent, set_our_init_sent, clear_our_init_sent),
("Indicates we have received their `open_channel`/`accept_channel` message.",
- THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
+ THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT, is_their_init_sent, set_their_init_sent, clear_their_init_sent)
]
);
FUNDED_STATE, AwaitingChannelReadyFlags, [
("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
`OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
- THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
+ THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY,
+ is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready),
("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
`OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
- OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
+ OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY,
+ is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready),
("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
is being held until all channels in the batch have received `funding_signed` and have \
- their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
+ their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH,
+ is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch)
]
);
`revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
implicit ACK, so instead we have to hold them away temporarily to be sent later.",
- AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
+ AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE,
+ is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke)
]
);
+// Note that the order of this enum is implicitly defined by where each variant is placed. Take this
+// into account when introducing new states and update `test_channel_state_order` accordingly.
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
enum ChannelState {
/// We are negotiating the parameters required for the channel prior to funding it.
}
macro_rules! impl_state_flag {
- ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
+ ($get: ident, $set: ident, $clear: ident, [$($state: ident),+]) => {
#[allow(unused)]
fn $get(&self) -> bool {
match self {
$(
- ChannelState::$state(flags) => flags.is_set($state_flag.into()),
+ ChannelState::$state(flags) => flags.$get(),
)*
_ => false,
}
fn $set(&mut self) {
match self {
$(
- ChannelState::$state(flags) => *flags |= $state_flag,
+ ChannelState::$state(flags) => flags.$set(),
)*
_ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
}
fn $clear(&mut self) {
match self {
$(
- ChannelState::$state(flags) => *flags &= !($state_flag),
+ ChannelState::$state(flags) => { let _ = flags.$clear(); },
)*
_ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
}
}
};
- ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
- impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
+ ($get: ident, $set: ident, $clear: ident, FUNDED_STATES) => {
+ impl_state_flag!($get, $set, $clear, [AwaitingChannelReady, ChannelReady]);
};
- ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
- impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
+ ($get: ident, $set: ident, $clear: ident, $state: ident) => {
+ impl_state_flag!($get, $set, $clear, [$state]);
};
}
}
}
- fn should_force_holding_cell(&self) -> bool {
+ fn can_generate_new_commitment(&self) -> bool {
match self {
ChannelState::ChannelReady(flags) =>
- flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
- flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
- flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
+ !flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) &&
+ !flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) &&
+ !flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
_ => {
- debug_assert!(false, "The holding cell is only valid within ChannelReady");
+ debug_assert!(false, "Can only generate new commitment within ChannelReady");
false
},
}
}
- impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
- FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
- impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
- FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
- impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
- FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
- impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
- FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
- impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
- AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
- impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
- AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
- impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
- AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
- impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
- ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
+ impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected, FUNDED_STATES);
+ impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress, FUNDED_STATES);
+ impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent, FUNDED_STATES);
+ impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent, FUNDED_STATES);
+ impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready, AwaitingChannelReady);
+ impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready, AwaitingChannelReady);
+ impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch, AwaitingChannelReady);
+ impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke, ChannelReady);
}
pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
pub finalized_claimed_htlcs: Vec<HTLCSource>,
+ pub pending_update_adds: Vec<msgs::UpdateAddHTLC>,
pub funding_broadcastable: Option<Transaction>,
pub channel_ready: Option<msgs::ChannelReady>,
pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
pub(crate) struct ShutdownResult {
pub(crate) closure_reason: ClosureReason,
/// A channel monitor update to apply.
- pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
+ pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelId, ChannelMonitorUpdate)>,
/// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
/// An unbroadcasted batch funding transaction id. The closure of this channel should be
pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
UnfundedOutboundV1(OutboundV1Channel<SP>),
UnfundedInboundV1(InboundV1Channel<SP>),
+ #[cfg(any(dual_funding, splicing))]
+ UnfundedOutboundV2(OutboundV2Channel<SP>),
+ #[cfg(any(dual_funding, splicing))]
+ UnfundedInboundV2(InboundV2Channel<SP>),
Funded(Channel<SP>),
}
ChannelPhase::Funded(chan) => &chan.context,
ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedOutboundV2(chan) => &chan.context,
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedInboundV2(chan) => &chan.context,
}
}
ChannelPhase::Funded(ref mut chan) => &mut chan.context,
ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedOutboundV2(ref mut chan) => &mut chan.context,
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedInboundV2(ref mut chan) => &mut chan.context,
}
}
}
monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
monitor_pending_finalized_fulfills: Vec<HTLCSource>,
+ monitor_pending_update_adds: Vec<msgs::UpdateAddHTLC>,
/// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
/// but our signer (initially) refused to give us a signature, we should retry at some point in
// We track whether we already emitted a `ChannelReady` event.
channel_ready_event_emitted: bool,
+ /// Some if we initiated to shut down the channel.
+ local_initiated_shutdown: Option<()>,
+
/// The unique identifier used to re-derive the private key material for the channel through
/// [`SignerProvider::derive_channel_signer`].
+ #[cfg(not(test))]
channel_keys_id: [u8; 32],
+ #[cfg(test)]
+ pub channel_keys_id: [u8; 32],
/// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
/// store it here and only release it to the `ChannelManager` once it asks for it.
}
impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
- /// Allowed in any state (including after shutdown)
- pub fn get_update_time_counter(&self) -> u32 {
- self.update_time_counter
- }
+ fn new_for_inbound_channel<'a, ES: Deref, F: Deref, L: Deref>(
+ fee_estimator: &'a LowerBoundedFeeEstimator<F>,
+ entropy_source: &'a ES,
+ signer_provider: &'a SP,
+ counterparty_node_id: PublicKey,
+ their_features: &'a InitFeatures,
+ user_id: u128,
+ config: &'a UserConfig,
+ current_chain_height: u32,
+ logger: &'a L,
+ is_0conf: bool,
+ our_funding_satoshis: u64,
+ counterparty_pubkeys: ChannelPublicKeys,
+ channel_type: ChannelTypeFeatures,
+ holder_selected_channel_reserve_satoshis: u64,
+ msg_channel_reserve_satoshis: u64,
+ msg_push_msat: u64,
+ open_channel_fields: msgs::CommonOpenChannelFields,
+ ) -> Result<ChannelContext<SP>, ChannelError>
+ where
+ ES::Target: EntropySource,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
+ SP::Target: SignerProvider,
+ {
+ let logger = WithContext::from(logger, Some(counterparty_node_id), Some(open_channel_fields.temporary_channel_id));
+ let announced_channel = if (open_channel_fields.channel_flags & 1) == 1 { true } else { false };
- pub fn get_latest_monitor_update_id(&self) -> u64 {
- self.latest_monitor_update_id
- }
+ let channel_value_satoshis = our_funding_satoshis.saturating_add(open_channel_fields.funding_satoshis);
- pub fn should_announce(&self) -> bool {
- self.config.announced_channel
- }
+ let channel_keys_id = signer_provider.generate_channel_keys_id(true, channel_value_satoshis, user_id);
+ let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
+ let pubkeys = holder_signer.pubkeys().clone();
- pub fn is_outbound(&self) -> bool {
- self.channel_transaction_parameters.is_outbound_from_holder
- }
+ if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
+ return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
+ }
- /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
- /// Allowed in any state (including after shutdown)
- pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
- self.config.options.forwarding_fee_base_msat
- }
+ // Check sanity of message fields:
+ if channel_value_satoshis > config.channel_handshake_limits.max_funding_satoshis {
+ return Err(ChannelError::Close(format!(
+ "Per our config, funding must be at most {}. It was {}. Peer contribution: {}. Our contribution: {}",
+ config.channel_handshake_limits.max_funding_satoshis, channel_value_satoshis,
+ open_channel_fields.funding_satoshis, our_funding_satoshis)));
+ }
+ if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
+ return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", channel_value_satoshis)));
+ }
+ if msg_channel_reserve_satoshis > channel_value_satoshis {
+ return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be no greater than channel_value_satoshis: {}", msg_channel_reserve_satoshis, channel_value_satoshis)));
+ }
+ let full_channel_value_msat = (channel_value_satoshis - msg_channel_reserve_satoshis) * 1000;
+ if msg_push_msat > full_channel_value_msat {
+ return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg_push_msat, full_channel_value_msat)));
+ }
+ if open_channel_fields.dust_limit_satoshis > channel_value_satoshis {
+ return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than channel_value_satoshis {}. Peer never wants payout outputs?", open_channel_fields.dust_limit_satoshis, channel_value_satoshis)));
+ }
+ if open_channel_fields.htlc_minimum_msat >= full_channel_value_msat {
+ return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", open_channel_fields.htlc_minimum_msat, full_channel_value_msat)));
+ }
+ Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, open_channel_fields.commitment_feerate_sat_per_1000_weight, None, &&logger)?;
- /// Returns true if we've ever received a message from the remote end for this Channel
- pub fn have_received_message(&self) -> bool {
- self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
- }
+ let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
+ if open_channel_fields.to_self_delay > max_counterparty_selected_contest_delay {
+ return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, open_channel_fields.to_self_delay)));
+ }
+ if open_channel_fields.max_accepted_htlcs < 1 {
+ return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
+ }
+ if open_channel_fields.max_accepted_htlcs > MAX_HTLCS {
+ return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", open_channel_fields.max_accepted_htlcs, MAX_HTLCS)));
+ }
- /// Returns true if this channel is fully established and not known to be closing.
- /// Allowed in any state (including after shutdown)
- pub fn is_usable(&self) -> bool {
- matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
- !self.channel_state.is_local_shutdown_sent() &&
- !self.channel_state.is_remote_shutdown_sent() &&
- !self.monitor_pending_channel_ready
- }
+ // Now check against optional parameters as set by config...
+ if channel_value_satoshis < config.channel_handshake_limits.min_funding_satoshis {
+ return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", channel_value_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
+ }
+ if open_channel_fields.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
+ return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", open_channel_fields.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
+ }
+ if open_channel_fields.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
+ return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", open_channel_fields.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
+ }
+ if msg_channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
+ return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg_channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
+ }
+ if open_channel_fields.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
+ return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", open_channel_fields.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
+ }
+ if open_channel_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+ }
+ if open_channel_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
+ }
- /// shutdown state returns the state of the channel in its various stages of shutdown
- pub fn shutdown_state(&self) -> ChannelShutdownState {
- match self.channel_state {
- ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
- if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
- ChannelShutdownState::ShutdownInitiated
- } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
- ChannelShutdownState::ResolvingHTLCs
- } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
- ChannelShutdownState::NegotiatingClosingFee
- } else {
- ChannelShutdownState::NotShuttingDown
- },
- ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
- _ => ChannelShutdownState::NotShuttingDown,
+ // Convert things into internal flags and prep our state:
+
+ if config.channel_handshake_limits.force_announced_channel_preference {
+ if config.channel_handshake_config.announced_channel != announced_channel {
+ return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
+ }
}
- }
- fn closing_negotiation_ready(&self) -> bool {
- let is_ready_to_close = match self.channel_state {
- ChannelState::AwaitingChannelReady(flags) =>
- flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
- ChannelState::ChannelReady(flags) =>
- flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
- _ => false,
- };
- self.pending_inbound_htlcs.is_empty() &&
- self.pending_outbound_htlcs.is_empty() &&
- self.pending_update_fee.is_none() &&
- is_ready_to_close
- }
+ if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+ // Protocol level safety check in place, although it should never happen because
+ // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
+ return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+ }
+ if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
+ return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg_push_msat)));
+ }
+ if msg_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+ log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
+ msg_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
+ }
+ if holder_selected_channel_reserve_satoshis < open_channel_fields.dust_limit_satoshis {
+ return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", open_channel_fields.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
+ }
- /// Returns true if this channel is currently available for use. This is a superset of
- /// is_usable() and considers things like the channel being temporarily disabled.
- /// Allowed in any state (including after shutdown)
- pub fn is_live(&self) -> bool {
- self.is_usable() && !self.channel_state.is_peer_disconnected()
- }
+ // check if the funder's amount for the initial commitment tx is sufficient
+ // for full fee payment plus a few HTLCs to ensure the channel will be useful.
+ let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
+ ANCHOR_OUTPUT_VALUE_SATOSHI * 2
+ } else {
+ 0
+ };
+ let funders_amount_msat = open_channel_fields.funding_satoshis * 1000 - msg_push_msat;
+ let commitment_tx_fee = commit_tx_fee_msat(open_channel_fields.commitment_feerate_sat_per_1000_weight, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
+ if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
+ return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
+ }
- // Public utilities:
+ let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
+ // While it's reasonable for us to not meet the channel reserve initially (if they don't
+ // want to push much to us), our counterparty should always have more than our reserve.
+ if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
+ return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
+ }
- pub fn channel_id(&self) -> ChannelId {
- self.channel_id
- }
+ let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
+ match &open_channel_fields.shutdown_scriptpubkey {
+ &Some(ref script) => {
+ // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
+ if script.len() == 0 {
+ None
+ } else {
+ if !script::is_bolt2_compliant(&script, their_features) {
+ return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
+ }
+ Some(script.clone())
+ }
+ },
+ // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
+ &None => {
+ return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
+ }
+ }
+ } else { None };
- // Return the `temporary_channel_id` used during channel establishment.
- //
- // Will return `None` for channels created prior to LDK version 0.0.115.
- pub fn temporary_channel_id(&self) -> Option<ChannelId> {
- self.temporary_channel_id
- }
+ let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
+ match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => Some(scriptpubkey),
+ Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
+ }
+ } else { None };
- pub fn minimum_depth(&self) -> Option<u32> {
- self.minimum_depth
- }
+ if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
+ if !shutdown_scriptpubkey.is_compatible(&their_features) {
+ return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
+ }
+ }
- /// Gets the "user_id" value passed into the construction of this channel. It has no special
- /// meaning and exists only to allow users to have a persistent identifier of a channel.
- pub fn get_user_id(&self) -> u128 {
- self.user_id
- }
+ let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
+ Ok(script) => script,
+ Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
+ };
- /// Gets the channel's type
- pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
- &self.channel_type
- }
+ let mut secp_ctx = Secp256k1::new();
+ secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
+
+ let minimum_depth = if is_0conf {
+ Some(0)
+ } else {
+ Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
+ };
+
+ let value_to_self_msat = our_funding_satoshis * 1000 + msg_push_msat;
+
+ // TODO(dual_funding): Checks for `funding_feerate_sat_per_1000_weight`?
+
+ let channel_context = ChannelContext {
+ user_id,
+
+ config: LegacyChannelConfig {
+ options: config.channel_config.clone(),
+ announced_channel,
+ commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
+ },
+
+ prev_config: None,
+
+ inbound_handshake_limits_override: None,
+
+ temporary_channel_id: Some(open_channel_fields.temporary_channel_id),
+ channel_id: open_channel_fields.temporary_channel_id,
+ channel_state: ChannelState::NegotiatingFunding(
+ NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
+ ),
+ announcement_sigs_state: AnnouncementSigsState::NotSent,
+ secp_ctx,
+
+ latest_monitor_update_id: 0,
+
+ holder_signer: ChannelSignerType::Ecdsa(holder_signer),
+ shutdown_scriptpubkey,
+ destination_script,
+
+ cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+ cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+ value_to_self_msat,
+
+ pending_inbound_htlcs: Vec::new(),
+ pending_outbound_htlcs: Vec::new(),
+ holding_cell_htlc_updates: Vec::new(),
+ pending_update_fee: None,
+ holding_cell_update_fee: None,
+ next_holder_htlc_id: 0,
+ next_counterparty_htlc_id: 0,
+ update_time_counter: 1,
+
+ resend_order: RAACommitmentOrder::CommitmentFirst,
+
+ monitor_pending_channel_ready: false,
+ monitor_pending_revoke_and_ack: false,
+ monitor_pending_commitment_signed: false,
+ monitor_pending_forwards: Vec::new(),
+ monitor_pending_failures: Vec::new(),
+ monitor_pending_finalized_fulfills: Vec::new(),
+ monitor_pending_update_adds: Vec::new(),
+
+ signer_pending_commitment_update: false,
+ signer_pending_funding: false,
+
+
+ #[cfg(debug_assertions)]
+ holder_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))),
+ #[cfg(debug_assertions)]
+ counterparty_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))),
+
+ last_sent_closing_fee: None,
+ pending_counterparty_closing_signed: None,
+ expecting_peer_commitment_signed: false,
+ closing_fee_limits: None,
+ target_closing_feerate_sats_per_kw: None,
+
+ funding_tx_confirmed_in: None,
+ funding_tx_confirmation_height: 0,
+ short_channel_id: None,
+ channel_creation_height: current_chain_height,
+
+ feerate_per_kw: open_channel_fields.commitment_feerate_sat_per_1000_weight,
+ channel_value_satoshis,
+ counterparty_dust_limit_satoshis: open_channel_fields.dust_limit_satoshis,
+ holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
+ counterparty_max_htlc_value_in_flight_msat: cmp::min(open_channel_fields.max_htlc_value_in_flight_msat, channel_value_satoshis * 1000),
+ holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
+ counterparty_selected_channel_reserve_satoshis: Some(msg_channel_reserve_satoshis),
+ holder_selected_channel_reserve_satoshis,
+ counterparty_htlc_minimum_msat: open_channel_fields.htlc_minimum_msat,
+ holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
+ counterparty_max_accepted_htlcs: open_channel_fields.max_accepted_htlcs,
+ holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
+ minimum_depth,
+
+ counterparty_forwarding_info: None,
+
+ channel_transaction_parameters: ChannelTransactionParameters {
+ holder_pubkeys: pubkeys,
+ holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
+ is_outbound_from_holder: false,
+ counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
+ selected_contest_delay: open_channel_fields.to_self_delay,
+ pubkeys: counterparty_pubkeys,
+ }),
+ funding_outpoint: None,
+ channel_type_features: channel_type.clone()
+ },
+ funding_transaction: None,
+ is_batch_funding: None,
+
+ counterparty_cur_commitment_point: Some(open_channel_fields.first_per_commitment_point),
+ counterparty_prev_commitment_point: None,
+ counterparty_node_id,
+
+ counterparty_shutdown_scriptpubkey,
+
+ commitment_secrets: CounterpartyCommitmentSecrets::new(),
+
+ channel_update_status: ChannelUpdateStatus::Enabled,
+ closing_signed_in_flight: false,
+
+ announcement_sigs: None,
+
+ #[cfg(any(test, fuzzing))]
+ next_local_commitment_tx_fee_info_cached: Mutex::new(None),
+ #[cfg(any(test, fuzzing))]
+ next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
+
+ workaround_lnd_bug_4006: None,
+ sent_message_awaiting_response: None,
+
+ latest_inbound_scid_alias: None,
+ outbound_scid_alias: 0,
+
+ channel_pending_event_emitted: false,
+ channel_ready_event_emitted: false,
+
+ #[cfg(any(test, fuzzing))]
+ historical_inbound_htlc_fulfills: new_hash_set(),
+
+ channel_type,
+ channel_keys_id,
+
+ local_initiated_shutdown: None,
+
+ blocked_monitor_updates: Vec::new(),
+ };
+
+ Ok(channel_context)
+ }
+
+ fn new_for_outbound_channel<'a, ES: Deref, F: Deref>(
+ fee_estimator: &'a LowerBoundedFeeEstimator<F>,
+ entropy_source: &'a ES,
+ signer_provider: &'a SP,
+ counterparty_node_id: PublicKey,
+ their_features: &'a InitFeatures,
+ funding_satoshis: u64,
+ push_msat: u64,
+ user_id: u128,
+ config: &'a UserConfig,
+ current_chain_height: u32,
+ outbound_scid_alias: u64,
+ temporary_channel_id: Option<ChannelId>,
+ holder_selected_channel_reserve_satoshis: u64,
+ channel_keys_id: [u8; 32],
+ holder_signer: <SP::Target as SignerProvider>::EcdsaSigner,
+ pubkeys: ChannelPublicKeys,
+ ) -> Result<ChannelContext<SP>, APIError>
+ where
+ ES::Target: EntropySource,
+ F::Target: FeeEstimator,
+ SP::Target: SignerProvider,
+ {
+ // This will be updated with the counterparty contribution if this is a dual-funded channel
+ let channel_value_satoshis = funding_satoshis;
+
+ let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
+
+ if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
+ return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
+ }
+ if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
+ return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
+ }
+ let channel_value_msat = channel_value_satoshis * 1000;
+ if push_msat > channel_value_msat {
+ return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
+ }
+ if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
+ return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
+ }
+
+ let channel_type = get_initial_channel_type(&config, their_features);
+ debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
+
+ let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
+ (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
+ } else {
+ (ConfirmationTarget::NonAnchorChannelFee, 0)
+ };
+ let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
+
+ let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
+ let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
+ if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
+ return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
+ }
+
+ let mut secp_ctx = Secp256k1::new();
+ secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
+
+ let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
+ match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => Some(scriptpubkey),
+ Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
+ }
+ } else { None };
+
+ if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
+ if !shutdown_scriptpubkey.is_compatible(&their_features) {
+ return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
+ }
+ }
+
+ let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
+ Ok(script) => script,
+ Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
+ };
+
+ let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
+
+ Ok(Self {
+ user_id,
+
+ config: LegacyChannelConfig {
+ options: config.channel_config.clone(),
+ announced_channel: config.channel_handshake_config.announced_channel,
+ commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
+ },
+
+ prev_config: None,
+
+ inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
+
+ channel_id: temporary_channel_id,
+ temporary_channel_id: Some(temporary_channel_id),
+ channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
+ announcement_sigs_state: AnnouncementSigsState::NotSent,
+ secp_ctx,
+ // We'll add our counterparty's `funding_satoshis` when we receive `accept_channel2`.
+ channel_value_satoshis,
+
+ latest_monitor_update_id: 0,
+
+ holder_signer: ChannelSignerType::Ecdsa(holder_signer),
+ shutdown_scriptpubkey,
+ destination_script,
+
+ cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+ cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+ value_to_self_msat,
+
+ pending_inbound_htlcs: Vec::new(),
+ pending_outbound_htlcs: Vec::new(),
+ holding_cell_htlc_updates: Vec::new(),
+ pending_update_fee: None,
+ holding_cell_update_fee: None,
+ next_holder_htlc_id: 0,
+ next_counterparty_htlc_id: 0,
+ update_time_counter: 1,
+
+ resend_order: RAACommitmentOrder::CommitmentFirst,
+
+ monitor_pending_channel_ready: false,
+ monitor_pending_revoke_and_ack: false,
+ monitor_pending_commitment_signed: false,
+ monitor_pending_forwards: Vec::new(),
+ monitor_pending_failures: Vec::new(),
+ monitor_pending_finalized_fulfills: Vec::new(),
+ monitor_pending_update_adds: Vec::new(),
+
+ signer_pending_commitment_update: false,
+ signer_pending_funding: false,
+
+ // We'll add our counterparty's `funding_satoshis` to these max commitment output assertions
+ // when we receive `accept_channel2`.
+ #[cfg(debug_assertions)]
+ holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
+ #[cfg(debug_assertions)]
+ counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
+
+ last_sent_closing_fee: None,
+ pending_counterparty_closing_signed: None,
+ expecting_peer_commitment_signed: false,
+ closing_fee_limits: None,
+ target_closing_feerate_sats_per_kw: None,
+
+ funding_tx_confirmed_in: None,
+ funding_tx_confirmation_height: 0,
+ short_channel_id: None,
+ channel_creation_height: current_chain_height,
+
+ feerate_per_kw: commitment_feerate,
+ counterparty_dust_limit_satoshis: 0,
+ holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
+ counterparty_max_htlc_value_in_flight_msat: 0,
+ // We'll adjust this to include our counterparty's `funding_satoshis` when we
+ // receive `accept_channel2`.
+ holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
+ counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
+ holder_selected_channel_reserve_satoshis,
+ counterparty_htlc_minimum_msat: 0,
+ holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
+ counterparty_max_accepted_htlcs: 0,
+ holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
+ minimum_depth: None, // Filled in in accept_channel
+
+ counterparty_forwarding_info: None,
+
+ channel_transaction_parameters: ChannelTransactionParameters {
+ holder_pubkeys: pubkeys,
+ holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
+ is_outbound_from_holder: true,
+ counterparty_parameters: None,
+ funding_outpoint: None,
+ channel_type_features: channel_type.clone()
+ },
+ funding_transaction: None,
+ is_batch_funding: None,
+
+ counterparty_cur_commitment_point: None,
+ counterparty_prev_commitment_point: None,
+ counterparty_node_id,
+
+ counterparty_shutdown_scriptpubkey: None,
+
+ commitment_secrets: CounterpartyCommitmentSecrets::new(),
+
+ channel_update_status: ChannelUpdateStatus::Enabled,
+ closing_signed_in_flight: false,
+
+ announcement_sigs: None,
+
+ #[cfg(any(test, fuzzing))]
+ next_local_commitment_tx_fee_info_cached: Mutex::new(None),
+ #[cfg(any(test, fuzzing))]
+ next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
+
+ workaround_lnd_bug_4006: None,
+ sent_message_awaiting_response: None,
+
+ latest_inbound_scid_alias: None,
+ outbound_scid_alias,
+
+ channel_pending_event_emitted: false,
+ channel_ready_event_emitted: false,
+
+ #[cfg(any(test, fuzzing))]
+ historical_inbound_htlc_fulfills: new_hash_set(),
+
+ channel_type,
+ channel_keys_id,
+
+ blocked_monitor_updates: Vec::new(),
+ local_initiated_shutdown: None,
+ })
+ }
+
+ /// Allowed in any state (including after shutdown)
+ pub fn get_update_time_counter(&self) -> u32 {
+ self.update_time_counter
+ }
+
+ pub fn get_latest_monitor_update_id(&self) -> u64 {
+ self.latest_monitor_update_id
+ }
+
+ pub fn should_announce(&self) -> bool {
+ self.config.announced_channel
+ }
+
+ pub fn is_outbound(&self) -> bool {
+ self.channel_transaction_parameters.is_outbound_from_holder
+ }
+
+ /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
+ /// Allowed in any state (including after shutdown)
+ pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
+ self.config.options.forwarding_fee_base_msat
+ }
+
+ /// Returns true if we've ever received a message from the remote end for this Channel
+ pub fn have_received_message(&self) -> bool {
+ self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
+ }
+
+ /// Returns true if this channel is fully established and not known to be closing.
+ /// Allowed in any state (including after shutdown)
+ pub fn is_usable(&self) -> bool {
+ matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
+ !self.channel_state.is_local_shutdown_sent() &&
+ !self.channel_state.is_remote_shutdown_sent() &&
+ !self.monitor_pending_channel_ready
+ }
+
+ /// shutdown state returns the state of the channel in its various stages of shutdown
+ pub fn shutdown_state(&self) -> ChannelShutdownState {
+ match self.channel_state {
+ ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
+ if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
+ ChannelShutdownState::ShutdownInitiated
+ } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
+ ChannelShutdownState::ResolvingHTLCs
+ } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
+ ChannelShutdownState::NegotiatingClosingFee
+ } else {
+ ChannelShutdownState::NotShuttingDown
+ },
+ ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
+ _ => ChannelShutdownState::NotShuttingDown,
+ }
+ }
+
+ fn closing_negotiation_ready(&self) -> bool {
+ let is_ready_to_close = match self.channel_state {
+ ChannelState::AwaitingChannelReady(flags) =>
+ flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
+ ChannelState::ChannelReady(flags) =>
+ flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
+ _ => false,
+ };
+ self.pending_inbound_htlcs.is_empty() &&
+ self.pending_outbound_htlcs.is_empty() &&
+ self.pending_update_fee.is_none() &&
+ is_ready_to_close
+ }
+
+ /// Returns true if this channel is currently available for use. This is a superset of
+ /// is_usable() and considers things like the channel being temporarily disabled.
+ /// Allowed in any state (including after shutdown)
+ pub fn is_live(&self) -> bool {
+ self.is_usable() && !self.channel_state.is_peer_disconnected()
+ }
+
+ // Public utilities:
+
+ pub fn channel_id(&self) -> ChannelId {
+ self.channel_id
+ }
+
+ // Return the `temporary_channel_id` used during channel establishment.
+ //
+ // Will return `None` for channels created prior to LDK version 0.0.115.
+ pub fn temporary_channel_id(&self) -> Option<ChannelId> {
+ self.temporary_channel_id
+ }
+
+ pub fn minimum_depth(&self) -> Option<u32> {
+ self.minimum_depth
+ }
+
+ /// Gets the "user_id" value passed into the construction of this channel. It has no special
+ /// meaning and exists only to allow users to have a persistent identifier of a channel.
+ pub fn get_user_id(&self) -> u128 {
+ self.user_id
+ }
+
+ /// Gets the channel's type
+ pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
+ &self.channel_type
+ }
/// Gets the channel's `short_channel_id`.
///
feerate_per_kw = cmp::max(feerate_per_kw, feerate);
}
let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
- cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
+ cmp::max(feerate_per_kw + 2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
}
/// Get forwarding information for the counterparty.
stats
}
+ /// Returns information on all pending inbound HTLCs.
+ pub fn get_pending_inbound_htlc_details(&self) -> Vec<InboundHTLCDetails> {
+ let mut holding_cell_states = new_hash_map();
+ for holding_cell_update in self.holding_cell_htlc_updates.iter() {
+ match holding_cell_update {
+ HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
+ holding_cell_states.insert(
+ htlc_id,
+ InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill,
+ );
+ },
+ HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
+ holding_cell_states.insert(
+ htlc_id,
+ InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
+ );
+ },
+ HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => {
+ holding_cell_states.insert(
+ htlc_id,
+ InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
+ );
+ },
+ // Outbound HTLC.
+ HTLCUpdateAwaitingACK::AddHTLC { .. } => {},
+ }
+ }
+ let mut inbound_details = Vec::new();
+ let htlc_success_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ 0
+ } else {
+ let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
+ dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
+ };
+ let holder_dust_limit_success_sat = htlc_success_dust_limit + self.holder_dust_limit_satoshis;
+ for htlc in self.pending_inbound_htlcs.iter() {
+ if let Some(state_details) = (&htlc.state).into() {
+ inbound_details.push(InboundHTLCDetails{
+ htlc_id: htlc.htlc_id,
+ amount_msat: htlc.amount_msat,
+ cltv_expiry: htlc.cltv_expiry,
+ payment_hash: htlc.payment_hash,
+ state: Some(holding_cell_states.remove(&htlc.htlc_id).unwrap_or(state_details)),
+ is_dust: htlc.amount_msat / 1000 < holder_dust_limit_success_sat,
+ });
+ }
+ }
+ inbound_details
+ }
+
+ /// Returns information on all pending outbound HTLCs.
+ pub fn get_pending_outbound_htlc_details(&self) -> Vec<OutboundHTLCDetails> {
+ let mut outbound_details = Vec::new();
+ let htlc_timeout_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ 0
+ } else {
+ let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
+ dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
+ };
+ let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.holder_dust_limit_satoshis;
+ for htlc in self.pending_outbound_htlcs.iter() {
+ outbound_details.push(OutboundHTLCDetails{
+ htlc_id: Some(htlc.htlc_id),
+ amount_msat: htlc.amount_msat,
+ cltv_expiry: htlc.cltv_expiry,
+ payment_hash: htlc.payment_hash,
+ skimmed_fee_msat: htlc.skimmed_fee_msat,
+ state: Some((&htlc.state).into()),
+ is_dust: htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat,
+ });
+ }
+ for holding_cell_update in self.holding_cell_htlc_updates.iter() {
+ if let HTLCUpdateAwaitingACK::AddHTLC {
+ amount_msat,
+ cltv_expiry,
+ payment_hash,
+ skimmed_fee_msat,
+ ..
+ } = *holding_cell_update {
+ outbound_details.push(OutboundHTLCDetails{
+ htlc_id: None,
+ amount_msat: amount_msat,
+ cltv_expiry: cltv_expiry,
+ payment_hash: payment_hash,
+ skimmed_fee_msat: skimmed_fee_msat,
+ state: Some(OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
+ is_dust: amount_msat / 1000 < holder_dust_limit_timeout_sat,
+ });
+ }
+ }
+ outbound_details
+ }
+
/// Get the available balances, see [`AvailableBalances`]'s fields for more info.
/// Doesn't bother handling the
/// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
// funding transaction, don't return a funding txo (which prevents providing the
// monitor update to the user, even if we return one).
// See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
- let generate_monitor_update = match self.channel_state {
- ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
- _ => false,
- };
- if generate_monitor_update {
+ if !self.channel_state.is_pre_funded_state() {
self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
- Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
+ Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate {
update_id: self.latest_monitor_update_id,
counterparty_node_id: Some(self.counterparty_node_id),
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
+ channel_id: Some(self.channel_id()),
}))
} else { None }
} else { None };
_ => todo!()
}
}
-}
-
-// Internal utility functions for channels
-/// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
-/// `channel_value_satoshis` in msat, set through
-/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
-///
+ /// If we receive an error message when attempting to open a channel, it may only be a rejection
+ /// of the channel type we tried, not of our ability to open any channel at all. We can see if a
+ /// downgrade of channel features would be possible so that we can still open the channel.
+ pub(crate) fn maybe_downgrade_channel_features<F: Deref>(
+ &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>
+ ) -> Result<(), ()>
+ where
+ F::Target: FeeEstimator
+ {
+ if !self.is_outbound() ||
+ !matches!(
+ self.channel_state, ChannelState::NegotiatingFunding(flags)
+ if flags == NegotiatingFundingFlags::OUR_INIT_SENT
+ )
+ {
+ return Err(());
+ }
+ if self.channel_type == ChannelTypeFeatures::only_static_remote_key() {
+ // We've exhausted our options
+ return Err(());
+ }
+ // We support opening a few different types of channels. Try removing our additional
+ // features one by one until we've either arrived at our default or the counterparty has
+ // accepted one.
+ //
+ // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
+ // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
+ // checks whether the counterparty supports every feature, this would only happen if the
+ // counterparty is advertising the feature, but rejecting channels proposing the feature for
+ // whatever reason.
+ if self.channel_type.supports_anchors_zero_fee_htlc_tx() {
+ self.channel_type.clear_anchors_zero_fee_htlc_tx();
+ self.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
+ assert!(!self.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
+ } else if self.channel_type.supports_scid_privacy() {
+ self.channel_type.clear_scid_privacy();
+ } else {
+ self.channel_type = ChannelTypeFeatures::only_static_remote_key();
+ }
+ self.channel_transaction_parameters.channel_type_features = self.channel_type.clone();
+ Ok(())
+ }
+}
+
+// Internal utility functions for channels
+
+/// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
+/// `channel_value_satoshis` in msat, set through
+/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
+///
/// The effective percentage is lower bounded by 1% and upper bounded by 100%.
///
/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
cmp::min(channel_value_satoshis, cmp::max(q, 1000))
}
+/// Returns a minimum channel reserve value each party needs to maintain, fixed in the spec to a
+/// default of 1% of the total channel value.
+///
+/// Guaranteed to return a value no larger than channel_value_satoshis
+///
+/// This is used both for outbound and inbound channels and has lower bound
+/// of `dust_limit_satoshis`.
+#[cfg(any(dual_funding, splicing))]
+fn get_v2_channel_reserve_satoshis(channel_value_satoshis: u64, dust_limit_satoshis: u64) -> u64 {
+ // Fixed at 1% of channel value by spec.
+ let (q, _) = channel_value_satoshis.overflowing_div(100);
+ cmp::min(channel_value_satoshis, cmp::max(q, dust_limit_satoshis))
+}
+
// Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
// Note that num_htlcs should not include dust HTLCs.
#[inline]
(commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
}
+/// Context for dual-funded channels.
+#[cfg(any(dual_funding, splicing))]
+pub(super) struct DualFundingChannelContext {
+ /// The amount in satoshis we will be contributing to the channel.
+ pub our_funding_satoshis: u64,
+ /// The amount in satoshis our counterparty will be contributing to the channel.
+ pub their_funding_satoshis: u64,
+ /// The funding transaction locktime suggested by the initiator. If set by us, it is always set
+ /// to the current block height to align incentives against fee-sniping.
+ pub funding_tx_locktime: u32,
+ /// The feerate set by the initiator to be used for the funding transaction.
+ pub funding_feerate_sat_per_1000_weight: u32,
+}
+
// Holder designates channel data owned for the benefit of the user client.
// Counterparty designates channel data owned by the another channel participant entity.
pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
pub context: ChannelContext<SP>,
+ #[cfg(any(dual_funding, splicing))]
+ pub dual_funding_channel_context: Option<DualFundingChannelContext>,
}
#[cfg(any(test, fuzzing))]
where L::Target: Logger {
// Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
// (see equivalent if condition there).
- assert!(self.context.channel_state.should_force_holding_cell());
+ assert!(!self.context.channel_state.can_generate_new_commitment());
let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
self.context.latest_monitor_update_id = mon_update_id;
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
payment_preimage: payment_preimage_arg.clone(),
}],
+ channel_id: Some(self.context.channel_id()),
};
- if self.context.channel_state.should_force_holding_cell() {
+ if !self.context.channel_state.can_generate_new_commitment() {
// Note that this condition is the same as the assertion in
// `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
// `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
return Ok(None);
}
- if self.context.channel_state.should_force_holding_cell() {
+ if !self.context.channel_state.can_generate_new_commitment() {
debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
force_holding_cell = true;
}
let mut check_reconnection = false;
match &self.context.channel_state {
ChannelState::AwaitingChannelReady(flags) => {
- let flags = *flags & !FundedStateFlags::ALL;
+ let flags = flags.clone().clear(FundedStateFlags::ALL.into());
debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
- if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
+ if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
// If we reconnected before sending our `channel_ready` they may still resend theirs.
check_reconnection = true;
- } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
+ } else if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
self.context.channel_state.set_their_channel_ready();
} else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
- Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
+ Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger))
}
- pub fn update_add_htlc<F, FE: Deref, L: Deref>(
- &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
- create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
- ) -> Result<(), ChannelError>
- where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
- FE::Target: FeeEstimator, L::Target: Logger,
- {
+ pub fn update_add_htlc(
+ &mut self, msg: &msgs::UpdateAddHTLC, pending_forward_status: PendingHTLCStatus,
+ ) -> Result<(), ChannelError> {
if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
}
- // We can't accept HTLCs sent after we've sent a shutdown.
- if self.context.channel_state.is_local_shutdown_sent() {
- pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
- }
// If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
if self.context.channel_state.is_remote_shutdown_sent() {
return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
}
let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
- let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
}
}
}
- let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
- let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
- (0, 0)
- } else {
- let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
- (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
- dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
- };
- let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
- if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
- let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
- if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
- log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
- on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
- pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
- }
- }
-
- let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
- if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
- let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
- if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
- log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
- on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
- pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
- }
- }
-
let pending_value_to_self_msat =
self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
let pending_remote_value_msat =
} else {
0
};
- if !self.context.is_outbound() {
- // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
- // the spec because the fee spike buffer requirement doesn't exist on the receiver's
- // side, only on the sender's. Note that with anchor outputs we are no longer as
- // sensitive to fee spikes, so we need to account for them.
- let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
- let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
- if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
- remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
- }
- if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
- // Note that if the pending_forward_status is not updated here, then it's because we're already failing
- // the HTLC, i.e. its status is already set to failing.
- log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
- pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
- }
- } else {
+ if self.context.is_outbound() {
// Check that they won't violate our local required channel reserve by adding this HTLC.
let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
amount_msat: msg.amount_msat,
payment_hash: msg.payment_hash,
cltv_expiry: msg.cltv_expiry,
- state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
+ state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Resolved {
+ pending_htlc_status: pending_forward_status
+ }),
});
Ok(())
}
Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
}
- pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
+ pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option<u64>), ChannelError> {
if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
}
return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
}
- self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
+ self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat))
}
pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
}
for htlc in self.context.pending_inbound_htlcs.iter_mut() {
- let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
- Some(forward_info.clone())
+ let htlc_resolution = if let &InboundHTLCState::RemoteAnnounced(ref resolution) = &htlc.state {
+ Some(resolution.clone())
} else { None };
- if let Some(forward_info) = new_forward {
+ if let Some(htlc_resolution) = htlc_resolution {
log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
&htlc.payment_hash, &self.context.channel_id);
- htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
+ htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(htlc_resolution);
need_commitment = true;
}
}
htlc_outputs: htlcs_and_sigs,
claimed_htlcs,
nondust_htlc_sources,
- }]
+ }],
+ channel_id: Some(self.context.channel_id()),
};
self.context.cur_holder_commitment_transaction_number -= 1;
) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
where F::Target: FeeEstimator, L::Target: Logger
{
- if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
+ if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.channel_state.can_generate_new_commitment() {
self.free_holding_cell_htlcs(fee_estimator, logger)
} else { (None, Vec::new()) }
}
update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
counterparty_node_id: Some(self.context.counterparty_node_id),
updates: Vec::new(),
+ channel_id: Some(self.context.channel_id()),
};
let mut htlc_updates = Vec::new();
idx: self.context.cur_counterparty_commitment_transaction_number + 1,
secret: msg.per_commitment_secret,
}],
+ channel_id: Some(self.context.channel_id()),
};
// Update state now that we've passed all the can-fail calls...
log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
let mut to_forward_infos = Vec::new();
+ let mut pending_update_adds = Vec::new();
let mut revoked_htlcs = Vec::new();
let mut finalized_claimed_htlcs = Vec::new();
let mut update_fail_htlcs = Vec::new();
let mut state = InboundHTLCState::Committed;
mem::swap(&mut state, &mut htlc.state);
- if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
+ if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) = state {
log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
- htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
+ htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution);
require_commitment = true;
- } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
- match forward_info {
- PendingHTLCStatus::Fail(fail_msg) => {
- log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
- require_commitment = true;
- match fail_msg {
- HTLCFailureMsg::Relay(msg) => {
- htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
- update_fail_htlcs.push(msg)
- },
- HTLCFailureMsg::Malformed(msg) => {
- htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
- update_fail_malformed_htlcs.push(msg)
+ } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) = state {
+ match resolution {
+ InboundHTLCResolution::Resolved { pending_htlc_status } =>
+ match pending_htlc_status {
+ PendingHTLCStatus::Fail(fail_msg) => {
+ log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
+ require_commitment = true;
+ match fail_msg {
+ HTLCFailureMsg::Relay(msg) => {
+ htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
+ update_fail_htlcs.push(msg)
+ },
+ HTLCFailureMsg::Malformed(msg) => {
+ htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
+ update_fail_malformed_htlcs.push(msg)
+ },
+ }
},
+ PendingHTLCStatus::Forward(forward_info) => {
+ log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed, attempting to forward", &htlc.payment_hash);
+ to_forward_infos.push((forward_info, htlc.htlc_id));
+ htlc.state = InboundHTLCState::Committed;
+ }
}
- },
- PendingHTLCStatus::Forward(forward_info) => {
+ InboundHTLCResolution::Pending { update_add_htlc } => {
log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
- to_forward_infos.push((forward_info, htlc.htlc_id));
+ pending_update_adds.push(update_add_htlc);
htlc.state = InboundHTLCState::Committed;
}
}
}
}
+ self.context.monitor_pending_update_adds.append(&mut pending_update_adds);
+
if self.context.channel_state.is_monitor_update_in_progress() {
// We can't actually generate a new commitment transaction (incl by freeing holding
// cells) while we can't update the monitor, so we just return what we have.
// first received the funding_signed.
let mut funding_broadcastable =
if self.context.is_outbound() &&
- matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
- matches!(self.context.channel_state, ChannelState::ChannelReady(_))
+ (matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
+ matches!(self.context.channel_state, ChannelState::ChannelReady(_)))
{
self.context.funding_transaction.take()
} else { None };
mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
let mut finalized_claimed_htlcs = Vec::new();
mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
+ let mut pending_update_adds = Vec::new();
+ mem::swap(&mut pending_update_adds, &mut self.context.monitor_pending_update_adds);
if self.context.channel_state.is_peer_disconnected() {
self.context.monitor_pending_revoke_and_ack = false;
self.context.monitor_pending_commitment_signed = false;
return MonitorRestoreUpdates {
raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
- accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
+ accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_update_adds,
+ funding_broadcastable, channel_ready, announcement_sigs
};
}
if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
MonitorRestoreUpdates {
- raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
+ raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs,
+ pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs
}
}
let shutdown_msg = self.get_outbound_shutdown();
- let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
+ let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger);
if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
// If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
scriptpubkey: self.get_closing_scriptpubkey(),
}],
+ channel_id: Some(self.context.channel_id()),
};
self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
self.push_ret_blockable_mon_update(monitor_update)
}
}
+ let closure_reason = if self.initiated_shutdown() {
+ ClosureReason::LocallyInitiatedCooperativeClosure
+ } else {
+ ClosureReason::CounterpartyInitiatedCooperativeClosure
+ };
+
assert!(self.context.shutdown_scriptpubkey.is_some());
if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
if last_fee == msg.fee_satoshis {
let shutdown_result = ShutdownResult {
- closure_reason: ClosureReason::CooperativeClosure,
+ closure_reason,
monitor_update: None,
dropped_outbound_htlcs: Vec::new(),
unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
.map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
let shutdown_result = ShutdownResult {
- closure_reason: ClosureReason::CooperativeClosure,
+ closure_reason,
monitor_update: None,
dropped_outbound_htlcs: Vec::new(),
unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
})
}
+ pub fn can_accept_incoming_htlc<F: Deref, L: Deref>(
+ &self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: L
+ ) -> Result<(), (&'static str, u16)>
+ where
+ F::Target: FeeEstimator,
+ L::Target: Logger
+ {
+ if self.context.channel_state.is_local_shutdown_sent() {
+ return Err(("Shutdown was already sent", 0x4000|8))
+ }
+
+ let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
+ let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
+ let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
+ let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ (0, 0)
+ } else {
+ let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
+ (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
+ dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
+ };
+ let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
+ if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
+ let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
+ if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
+ log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
+ on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
+ return Err(("Exceeded our dust exposure limit on counterparty commitment tx", 0x1000|7))
+ }
+ }
+
+ let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
+ if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
+ let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
+ if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
+ log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
+ on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
+ return Err(("Exceeded our dust exposure limit on holder commitment tx", 0x1000|7))
+ }
+ }
+
+ let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
+ } else {
+ 0
+ };
+
+ let mut removed_outbound_total_msat = 0;
+ for ref htlc in self.context.pending_outbound_htlcs.iter() {
+ if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
+ removed_outbound_total_msat += htlc.amount_msat;
+ } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
+ removed_outbound_total_msat += htlc.amount_msat;
+ }
+ }
+
+ let pending_value_to_self_msat =
+ self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
+ let pending_remote_value_msat =
+ self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
+
+ if !self.context.is_outbound() {
+ // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
+ // the spec because the fee spike buffer requirement doesn't exist on the receiver's
+ // side, only on the sender's. Note that with anchor outputs we are no longer as
+ // sensitive to fee spikes, so we need to account for them.
+ let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
+ let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
+ if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
+ }
+ if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
+ log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
+ return Err(("Fee spike buffer violation", 0x1000|7));
+ }
+ }
+
+ Ok(())
+ }
+
pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
self.context.cur_holder_commitment_transaction_number + 1
}
if !self.is_awaiting_monitor_update() { return false; }
if matches!(
self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
- if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
+ if flags.clone().clear(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty()
) {
// If we're not a 0conf channel, we'll be waiting on a monitor update with only
// AwaitingChannelReady set, though our peer could have sent their channel_ready.
self.context.channel_state.is_local_shutdown_sent()
}
+ /// Returns true if we initiated to shut down the channel.
+ pub fn initiated_shutdown(&self) -> bool {
+ self.context.local_initiated_shutdown.is_some()
+ }
+
/// Returns true if this channel is fully shut down. True here implies that no further actions
/// may/will be taken on this channel, and thus this object should be freed. Any future changes
/// will be handled appropriately by the chain monitor.
// Note that we don't include ChannelState::WaitingForBatch as we don't want to send
// channel_ready until the entire batch is ready.
- let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
+ let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()).is_empty()) {
self.context.channel_state.set_our_channel_ready();
true
- } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
+ } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
self.context.update_time_counter += 1;
true
- } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
+ } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
// We got a reorg but not enough to trigger a force close, just ignore.
false
} else {
return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
}
- let need_holding_cell = self.context.channel_state.should_force_holding_cell();
+ let need_holding_cell = !self.context.channel_state.can_generate_new_commitment();
log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
payment_hash, amount_msat,
if force_holding_cell { "into holding cell" }
feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
- }]
+ }],
+ channel_id: Some(self.context.channel_id()),
};
self.context.channel_state.set_awaiting_remote_revoke();
monitor_update
// From here on out, we may not fail!
self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
self.context.channel_state.set_local_shutdown_sent();
+ self.context.local_initiated_shutdown = Some(());
self.context.update_time_counter += 1;
let monitor_update = if update_shutdown_script {
updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
scriptpubkey: self.get_closing_scriptpubkey(),
}],
+ channel_id: Some(self.context.channel_id()),
};
self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
self.push_ret_blockable_mon_update(monitor_update)
where ES::Target: EntropySource,
F::Target: FeeEstimator
{
- let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
- let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
- let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
- let pubkeys = holder_signer.pubkeys().clone();
-
- if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
- return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
- }
- if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
- return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
- }
- let channel_value_msat = channel_value_satoshis * 1000;
- if push_msat > channel_value_msat {
- return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
- }
- if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
- return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
- }
let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
// Protocol level safety check in place, although it should never happen because
// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
- return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
+ return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below \
+ implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
}
- let channel_type = Self::get_initial_channel_type(&config, their_features);
- debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
+ let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
+ let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
+ let pubkeys = holder_signer.pubkeys().clone();
- let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
- (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
- } else {
- (ConfirmationTarget::NonAnchorChannelFee, 0)
+ let chan = Self {
+ context: ChannelContext::new_for_outbound_channel(
+ fee_estimator,
+ entropy_source,
+ signer_provider,
+ counterparty_node_id,
+ their_features,
+ channel_value_satoshis,
+ push_msat,
+ user_id,
+ config,
+ current_chain_height,
+ outbound_scid_alias,
+ temporary_channel_id,
+ holder_selected_channel_reserve_satoshis,
+ channel_keys_id,
+ holder_signer,
+ pubkeys,
+ )?,
+ unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
};
- let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
-
- let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
- let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
- if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
- return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
- }
-
- let mut secp_ctx = Secp256k1::new();
- secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
-
- let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
- match signer_provider.get_shutdown_scriptpubkey() {
- Ok(scriptpubkey) => Some(scriptpubkey),
- Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
- }
- } else { None };
-
- if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
- if !shutdown_scriptpubkey.is_compatible(&their_features) {
- return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
- }
- }
+ Ok(chan)
+ }
- let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
- Ok(script) => script,
- Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
+ /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
+ fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
+ let counterparty_keys = self.context.build_remote_transaction_keys();
+ let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
+ let signature = match &self.context.holder_signer {
+ // TODO (taproot|arik): move match into calling method for Taproot
+ ChannelSignerType::Ecdsa(ecdsa) => {
+ ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
+ .map(|(sig, _)| sig).ok()?
+ },
+ // TODO (taproot|arik)
+ #[cfg(taproot)]
+ _ => todo!()
};
- let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
-
- Ok(Self {
- context: ChannelContext {
- user_id,
-
- config: LegacyChannelConfig {
- options: config.channel_config.clone(),
- announced_channel: config.channel_handshake_config.announced_channel,
- commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
- },
-
- prev_config: None,
-
- inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
-
- channel_id: temporary_channel_id,
- temporary_channel_id: Some(temporary_channel_id),
- channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
- announcement_sigs_state: AnnouncementSigsState::NotSent,
- secp_ctx,
- channel_value_satoshis,
-
- latest_monitor_update_id: 0,
-
- holder_signer: ChannelSignerType::Ecdsa(holder_signer),
- shutdown_scriptpubkey,
- destination_script,
-
- cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
- cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
- value_to_self_msat,
-
- pending_inbound_htlcs: Vec::new(),
- pending_outbound_htlcs: Vec::new(),
- holding_cell_htlc_updates: Vec::new(),
- pending_update_fee: None,
- holding_cell_update_fee: None,
- next_holder_htlc_id: 0,
- next_counterparty_htlc_id: 0,
- update_time_counter: 1,
-
- resend_order: RAACommitmentOrder::CommitmentFirst,
-
- monitor_pending_channel_ready: false,
- monitor_pending_revoke_and_ack: false,
- monitor_pending_commitment_signed: false,
- monitor_pending_forwards: Vec::new(),
- monitor_pending_failures: Vec::new(),
- monitor_pending_finalized_fulfills: Vec::new(),
-
- signer_pending_commitment_update: false,
- signer_pending_funding: false,
-
- #[cfg(debug_assertions)]
- holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
- #[cfg(debug_assertions)]
- counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
-
- last_sent_closing_fee: None,
- pending_counterparty_closing_signed: None,
- expecting_peer_commitment_signed: false,
- closing_fee_limits: None,
- target_closing_feerate_sats_per_kw: None,
-
- funding_tx_confirmed_in: None,
- funding_tx_confirmation_height: 0,
- short_channel_id: None,
- channel_creation_height: current_chain_height,
-
- feerate_per_kw: commitment_feerate,
- counterparty_dust_limit_satoshis: 0,
- holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
- counterparty_max_htlc_value_in_flight_msat: 0,
- holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
- counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
- holder_selected_channel_reserve_satoshis,
- counterparty_htlc_minimum_msat: 0,
- holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
- counterparty_max_accepted_htlcs: 0,
- holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
- minimum_depth: None, // Filled in in accept_channel
-
- counterparty_forwarding_info: None,
-
- channel_transaction_parameters: ChannelTransactionParameters {
- holder_pubkeys: pubkeys,
- holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
- is_outbound_from_holder: true,
- counterparty_parameters: None,
- funding_outpoint: None,
- channel_type_features: channel_type.clone()
- },
- funding_transaction: None,
- is_batch_funding: None,
-
- counterparty_cur_commitment_point: None,
- counterparty_prev_commitment_point: None,
- counterparty_node_id,
-
- counterparty_shutdown_scriptpubkey: None,
-
- commitment_secrets: CounterpartyCommitmentSecrets::new(),
-
- channel_update_status: ChannelUpdateStatus::Enabled,
- closing_signed_in_flight: false,
-
- announcement_sigs: None,
-
- #[cfg(any(test, fuzzing))]
- next_local_commitment_tx_fee_info_cached: Mutex::new(None),
- #[cfg(any(test, fuzzing))]
- next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
-
- workaround_lnd_bug_4006: None,
- sent_message_awaiting_response: None,
-
- latest_inbound_scid_alias: None,
- outbound_scid_alias,
-
- channel_pending_event_emitted: false,
- channel_ready_event_emitted: false,
-
- #[cfg(any(test, fuzzing))]
- historical_inbound_htlc_fulfills: HashSet::new(),
-
- channel_type,
- channel_keys_id,
-
- blocked_monitor_updates: Vec::new(),
- },
- unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
- })
- }
-
- /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
- fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
- let counterparty_keys = self.context.build_remote_transaction_keys();
- let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
- let signature = match &self.context.holder_signer {
- // TODO (taproot|arik): move match into calling method for Taproot
- ChannelSignerType::Ecdsa(ecdsa) => {
- ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
- .map(|(sig, _)| sig).ok()?
- },
- // TODO (taproot|arik)
- #[cfg(taproot)]
- _ => todo!()
- };
-
- if self.context.signer_pending_funding {
- log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
- self.context.signer_pending_funding = false;
- }
+ if self.context.signer_pending_funding {
+ log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
+ self.context.signer_pending_funding = false;
+ }
Some(msgs::FundingCreated {
temporary_channel_id: self.context.temporary_channel_id.unwrap(),
// Now that we're past error-generating stuff, update our local state:
self.context.channel_state = ChannelState::FundingNegotiated;
- self.context.channel_id = funding_txo.to_channel_id();
+ self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
// If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
// We can skip this if it is a zero-conf channel.
Ok(funding_created)
}
- fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
- // The default channel type (ie the first one we try) depends on whether the channel is
- // public - if it is, we just go with `only_static_remotekey` as it's the only option
- // available. If it's private, we first try `scid_privacy` as it provides better privacy
- // with no other changes, and fall back to `only_static_remotekey`.
- let mut ret = ChannelTypeFeatures::only_static_remote_key();
- if !config.channel_handshake_config.announced_channel &&
- config.channel_handshake_config.negotiate_scid_privacy &&
- their_features.supports_scid_privacy() {
- ret.set_scid_privacy_required();
- }
-
- // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
- // set it now. If they don't understand it, we'll fall back to our default of
- // `only_static_remotekey`.
- if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
- their_features.supports_anchors_zero_fee_htlc_tx() {
- ret.set_anchors_zero_fee_htlc_tx_required();
- }
-
- ret
- }
-
/// If we receive an error message, it may only be a rejection of the channel type we tried,
/// not of our ability to open any channel at all. Thus, on error, we should first call this
/// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
where
F::Target: FeeEstimator
{
- if !self.context.is_outbound() ||
- !matches!(
- self.context.channel_state, ChannelState::NegotiatingFunding(flags)
- if flags == NegotiatingFundingFlags::OUR_INIT_SENT
- )
- {
- return Err(());
- }
- if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
- // We've exhausted our options
- return Err(());
- }
- // We support opening a few different types of channels. Try removing our additional
- // features one by one until we've either arrived at our default or the counterparty has
- // accepted one.
- //
- // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
- // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
- // checks whether the counterparty supports every feature, this would only happen if the
- // counterparty is advertising the feature, but rejecting channels proposing the feature for
- // whatever reason.
- if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
- self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
- self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
- assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
- } else if self.context.channel_type.supports_scid_privacy() {
- self.context.channel_type.clear_scid_privacy();
- } else {
- self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
- }
- self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
+ self.context.maybe_downgrade_channel_features(fee_estimator)?;
Ok(self.get_open_channel(chain_hash))
}
let keys = self.context.get_holder_pubkeys();
msgs::OpenChannel {
- chain_hash,
- temporary_channel_id: self.context.channel_id,
- funding_satoshis: self.context.channel_value_satoshis,
+ common_fields: msgs::CommonOpenChannelFields {
+ chain_hash,
+ temporary_channel_id: self.context.channel_id,
+ funding_satoshis: self.context.channel_value_satoshis,
+ dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
+ max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
+ htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
+ commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw as u32,
+ to_self_delay: self.context.get_holder_selected_contest_delay(),
+ max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
+ funding_pubkey: keys.funding_pubkey,
+ revocation_basepoint: keys.revocation_basepoint.to_public_key(),
+ payment_basepoint: keys.payment_point,
+ delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
+ htlc_basepoint: keys.htlc_basepoint.to_public_key(),
+ first_per_commitment_point,
+ channel_flags: if self.context.config.announced_channel {1} else {0},
+ shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
+ Some(script) => script.clone().into_inner(),
+ None => Builder::new().into_script(),
+ }),
+ channel_type: Some(self.context.channel_type.clone()),
+ },
push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
- dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
- max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
- htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
- feerate_per_kw: self.context.feerate_per_kw as u32,
- to_self_delay: self.context.get_holder_selected_contest_delay(),
- max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
- funding_pubkey: keys.funding_pubkey,
- revocation_basepoint: keys.revocation_basepoint.to_public_key(),
- payment_point: keys.payment_point,
- delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
- htlc_basepoint: keys.htlc_basepoint.to_public_key(),
- first_per_commitment_point,
- channel_flags: if self.context.config.announced_channel {1} else {0},
- shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
- Some(script) => script.clone().into_inner(),
- None => Builder::new().into_script(),
- }),
- channel_type: Some(self.context.channel_type.clone()),
}
}
if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
}
- if msg.dust_limit_satoshis > 21000000 * 100000000 {
- return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
+ if msg.common_fields.dust_limit_satoshis > 21000000 * 100000000 {
+ return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.common_fields.dust_limit_satoshis)));
}
if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
}
- if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
- return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
+ if msg.common_fields.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
+ return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.common_fields.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
}
if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
}
let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
- if msg.htlc_minimum_msat >= full_channel_value_msat {
- return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
+ if msg.common_fields.htlc_minimum_msat >= full_channel_value_msat {
+ return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.common_fields.htlc_minimum_msat, full_channel_value_msat)));
}
let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
- if msg.to_self_delay > max_delay_acceptable {
- return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
+ if msg.common_fields.to_self_delay > max_delay_acceptable {
+ return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.common_fields.to_self_delay)));
}
- if msg.max_accepted_htlcs < 1 {
+ if msg.common_fields.max_accepted_htlcs < 1 {
return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
}
- if msg.max_accepted_htlcs > MAX_HTLCS {
- return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
+ if msg.common_fields.max_accepted_htlcs > MAX_HTLCS {
+ return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.common_fields.max_accepted_htlcs, MAX_HTLCS)));
}
// Now check against optional parameters as set by config...
- if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
- return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
+ if msg.common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
+ return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
}
- if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
- return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
+ if msg.common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
+ return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
}
if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
}
- if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
- return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
+ if msg.common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
+ return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
}
- if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+ if msg.common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
}
- if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
+ if msg.common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
}
- if msg.minimum_depth > peer_limits.max_minimum_depth {
- return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
+ if msg.common_fields.minimum_depth > peer_limits.max_minimum_depth {
+ return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.common_fields.minimum_depth)));
}
- if let Some(ty) = &msg.channel_type {
+ if let Some(ty) = &msg.common_fields.channel_type {
if *ty != self.context.channel_type {
return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
}
}
let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
- match &msg.shutdown_scriptpubkey {
+ match &msg.common_fields.shutdown_scriptpubkey {
&Some(ref script) => {
// Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
if script.len() == 0 {
}
} else { None };
- self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
- self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
+ self.context.counterparty_dust_limit_satoshis = msg.common_fields.dust_limit_satoshis;
+ self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.common_fields.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
- self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
- self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
+ self.context.counterparty_htlc_minimum_msat = msg.common_fields.htlc_minimum_msat;
+ self.context.counterparty_max_accepted_htlcs = msg.common_fields.max_accepted_htlcs;
if peer_limits.trust_own_funding_0conf {
- self.context.minimum_depth = Some(msg.minimum_depth);
+ self.context.minimum_depth = Some(msg.common_fields.minimum_depth);
} else {
- self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
+ self.context.minimum_depth = Some(cmp::max(1, msg.common_fields.minimum_depth));
}
let counterparty_pubkeys = ChannelPublicKeys {
- funding_pubkey: msg.funding_pubkey,
- revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
- payment_point: msg.payment_point,
- delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
- htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
+ funding_pubkey: msg.common_fields.funding_pubkey,
+ revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
+ payment_point: msg.common_fields.payment_basepoint,
+ delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
+ htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
};
self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
- selected_contest_delay: msg.to_self_delay,
+ selected_contest_delay: msg.common_fields.to_self_delay,
pubkeys: counterparty_pubkeys,
});
- self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
+ self.context.counterparty_cur_commitment_point = Some(msg.common_fields.first_per_commitment_point);
self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
self.context.channel_state = ChannelState::NegotiatingFunding(
&self.context.channel_transaction_parameters,
funding_redeemscript.clone(), self.context.channel_value_satoshis,
obscure_factor,
- holder_commitment_tx, best_block, self.context.counterparty_node_id);
+ holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
channel_monitor.provide_initial_counterparty_commitment_tx(
counterparty_initial_bitcoin_tx.txid, Vec::new(),
self.context.cur_counterparty_commitment_transaction_number,
self.context.counterparty_cur_commitment_point.unwrap(),
counterparty_initial_commitment_tx.feerate_per_kw(),
- counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
- counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
-
- assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
- if self.context.is_batch_funding() {
- self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
- } else {
- self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
- }
- self.context.cur_holder_commitment_transaction_number -= 1;
- self.context.cur_counterparty_commitment_transaction_number -= 1;
-
- log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
-
- let mut channel = Channel { context: self.context };
-
- let need_channel_ready = channel.check_get_channel_ready(0).is_some();
- channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
- Ok((channel, channel_monitor))
- }
-
- /// Indicates that the signer may have some signatures for us, so we should retry if we're
- /// blocked.
- #[cfg(async_signing)]
- pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
- if self.context.signer_pending_funding && self.context.is_outbound() {
- log_trace!(logger, "Signer unblocked a funding_created");
- self.get_funding_created_msg(logger)
- } else { None }
- }
-}
-
-/// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
-pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
- pub context: ChannelContext<SP>,
- pub unfunded_context: UnfundedChannelContext,
-}
-
-/// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
-/// [`msgs::OpenChannel`].
-pub(super) fn channel_type_from_open_channel(
- msg: &msgs::OpenChannel, their_features: &InitFeatures,
- our_supported_features: &ChannelTypeFeatures
-) -> Result<ChannelTypeFeatures, ChannelError> {
- if let Some(channel_type) = &msg.channel_type {
- if channel_type.supports_any_optional_bits() {
- return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
- }
-
- // We only support the channel types defined by the `ChannelManager` in
- // `provided_channel_type_features`. The channel type must always support
- // `static_remote_key`.
- if !channel_type.requires_static_remote_key() {
- return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
- }
- // Make sure we support all of the features behind the channel type.
- if !channel_type.is_subset(our_supported_features) {
- return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
- }
- let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
- if channel_type.requires_scid_privacy() && announced_channel {
- return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
- }
- Ok(channel_type.clone())
- } else {
- let channel_type = ChannelTypeFeatures::from_init(&their_features);
- if channel_type != ChannelTypeFeatures::only_static_remote_key() {
- return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
- }
- Ok(channel_type)
- }
-}
-
-impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
- /// Creates a new channel from a remote sides' request for one.
- /// Assumes chain_hash has already been checked and corresponds with what we expect!
- pub fn new<ES: Deref, F: Deref, L: Deref>(
- fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
- counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
- their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
- current_chain_height: u32, logger: &L, is_0conf: bool,
- ) -> Result<InboundV1Channel<SP>, ChannelError>
- where ES::Target: EntropySource,
- F::Target: FeeEstimator,
- L::Target: Logger,
- {
- let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
- let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
-
- // First check the channel type is known, failing before we do anything else if we don't
- // support this channel type.
- let channel_type = channel_type_from_open_channel(msg, their_features, our_supported_features)?;
-
- let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
- let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
- let pubkeys = holder_signer.pubkeys().clone();
- let counterparty_pubkeys = ChannelPublicKeys {
- funding_pubkey: msg.funding_pubkey,
- revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
- payment_point: msg.payment_point,
- delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
- htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
- };
-
- if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
- return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
- }
-
- // Check sanity of message fields:
- if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
- return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
- }
- if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
- return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
- }
- if msg.channel_reserve_satoshis > msg.funding_satoshis {
- return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
- }
- let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
- if msg.push_msat > full_channel_value_msat {
- return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
- }
- if msg.dust_limit_satoshis > msg.funding_satoshis {
- return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
- }
- if msg.htlc_minimum_msat >= full_channel_value_msat {
- return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
- }
- Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
-
- let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
- if msg.to_self_delay > max_counterparty_selected_contest_delay {
- return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
- }
- if msg.max_accepted_htlcs < 1 {
- return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
- }
- if msg.max_accepted_htlcs > MAX_HTLCS {
- return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
- }
-
- // Now check against optional parameters as set by config...
- if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
- return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
- }
- if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
- return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
- }
- if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
- return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
- }
- if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
- return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
- }
- if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
- return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
- }
- if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
- }
- if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
- }
-
- // Convert things into internal flags and prep our state:
-
- if config.channel_handshake_limits.force_announced_channel_preference {
- if config.channel_handshake_config.announced_channel != announced_channel {
- return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
- }
- }
-
- let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
- if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
- // Protocol level safety check in place, although it should never happen because
- // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
- return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
- }
- if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
- return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
- }
- if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
- log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
- msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
- }
- if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
- return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
- }
-
- // check if the funder's amount for the initial commitment tx is sufficient
- // for full fee payment plus a few HTLCs to ensure the channel will be useful.
- let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
- ANCHOR_OUTPUT_VALUE_SATOSHI * 2
- } else {
- 0
- };
- let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
- let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
- if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
- return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
- }
-
- let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
- // While it's reasonable for us to not meet the channel reserve initially (if they don't
- // want to push much to us), our counterparty should always have more than our reserve.
- if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
- return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
- }
-
- let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
- match &msg.shutdown_scriptpubkey {
- &Some(ref script) => {
- // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
- if script.len() == 0 {
- None
- } else {
- if !script::is_bolt2_compliant(&script, their_features) {
- return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
- }
- Some(script.clone())
- }
- },
- // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
- &None => {
- return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
- }
- }
- } else { None };
-
- let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
- match signer_provider.get_shutdown_scriptpubkey() {
- Ok(scriptpubkey) => Some(scriptpubkey),
- Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
- }
- } else { None };
-
- if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
- if !shutdown_scriptpubkey.is_compatible(&their_features) {
- return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
- }
- }
-
- let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
- Ok(script) => script,
- Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
- };
-
- let mut secp_ctx = Secp256k1::new();
- secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
-
- let minimum_depth = if is_0conf {
- Some(0)
- } else {
- Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
- };
-
- let chan = Self {
- context: ChannelContext {
- user_id,
-
- config: LegacyChannelConfig {
- options: config.channel_config.clone(),
- announced_channel,
- commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
- },
-
- prev_config: None,
-
- inbound_handshake_limits_override: None,
-
- temporary_channel_id: Some(msg.temporary_channel_id),
- channel_id: msg.temporary_channel_id,
- channel_state: ChannelState::NegotiatingFunding(
- NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
- ),
- announcement_sigs_state: AnnouncementSigsState::NotSent,
- secp_ctx,
-
- latest_monitor_update_id: 0,
-
- holder_signer: ChannelSignerType::Ecdsa(holder_signer),
- shutdown_scriptpubkey,
- destination_script,
-
- cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
- cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
- value_to_self_msat: msg.push_msat,
-
- pending_inbound_htlcs: Vec::new(),
- pending_outbound_htlcs: Vec::new(),
- holding_cell_htlc_updates: Vec::new(),
- pending_update_fee: None,
- holding_cell_update_fee: None,
- next_holder_htlc_id: 0,
- next_counterparty_htlc_id: 0,
- update_time_counter: 1,
-
- resend_order: RAACommitmentOrder::CommitmentFirst,
-
- monitor_pending_channel_ready: false,
- monitor_pending_revoke_and_ack: false,
- monitor_pending_commitment_signed: false,
- monitor_pending_forwards: Vec::new(),
- monitor_pending_failures: Vec::new(),
- monitor_pending_finalized_fulfills: Vec::new(),
-
- signer_pending_commitment_update: false,
- signer_pending_funding: false,
-
- #[cfg(debug_assertions)]
- holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
- #[cfg(debug_assertions)]
- counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
-
- last_sent_closing_fee: None,
- pending_counterparty_closing_signed: None,
- expecting_peer_commitment_signed: false,
- closing_fee_limits: None,
- target_closing_feerate_sats_per_kw: None,
-
- funding_tx_confirmed_in: None,
- funding_tx_confirmation_height: 0,
- short_channel_id: None,
- channel_creation_height: current_chain_height,
-
- feerate_per_kw: msg.feerate_per_kw,
- channel_value_satoshis: msg.funding_satoshis,
- counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
- holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
- counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
- holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
- counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
- holder_selected_channel_reserve_satoshis,
- counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
- holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
- counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
- holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
- minimum_depth,
+ counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
+ counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
- counterparty_forwarding_info: None,
+ assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
+ if self.context.is_batch_funding() {
+ self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
+ } else {
+ self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
+ }
+ self.context.cur_holder_commitment_transaction_number -= 1;
+ self.context.cur_counterparty_commitment_transaction_number -= 1;
- channel_transaction_parameters: ChannelTransactionParameters {
- holder_pubkeys: pubkeys,
- holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
- is_outbound_from_holder: false,
- counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
- selected_contest_delay: msg.to_self_delay,
- pubkeys: counterparty_pubkeys,
- }),
- funding_outpoint: None,
- channel_type_features: channel_type.clone()
- },
- funding_transaction: None,
- is_batch_funding: None,
+ log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
- counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
- counterparty_prev_commitment_point: None,
- counterparty_node_id,
+ let mut channel = Channel {
+ context: self.context,
+ #[cfg(any(dual_funding, splicing))]
+ dual_funding_channel_context: None,
+ };
- counterparty_shutdown_scriptpubkey,
+ let need_channel_ready = channel.check_get_channel_ready(0).is_some();
+ channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
+ Ok((channel, channel_monitor))
+ }
- commitment_secrets: CounterpartyCommitmentSecrets::new(),
+ /// Indicates that the signer may have some signatures for us, so we should retry if we're
+ /// blocked.
+ #[cfg(async_signing)]
+ pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
+ if self.context.signer_pending_funding && self.context.is_outbound() {
+ log_trace!(logger, "Signer unblocked a funding_created");
+ self.get_funding_created_msg(logger)
+ } else { None }
+ }
+}
- channel_update_status: ChannelUpdateStatus::Enabled,
- closing_signed_in_flight: false,
+/// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
+pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
+ pub context: ChannelContext<SP>,
+ pub unfunded_context: UnfundedChannelContext,
+}
- announcement_sigs: None,
+/// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
+/// [`msgs::CommonOpenChannelFields`].
+pub(super) fn channel_type_from_open_channel(
+ common_fields: &msgs::CommonOpenChannelFields, their_features: &InitFeatures,
+ our_supported_features: &ChannelTypeFeatures
+) -> Result<ChannelTypeFeatures, ChannelError> {
+ if let Some(channel_type) = &common_fields.channel_type {
+ if channel_type.supports_any_optional_bits() {
+ return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
+ }
- #[cfg(any(test, fuzzing))]
- next_local_commitment_tx_fee_info_cached: Mutex::new(None),
- #[cfg(any(test, fuzzing))]
- next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
+ // We only support the channel types defined by the `ChannelManager` in
+ // `provided_channel_type_features`. The channel type must always support
+ // `static_remote_key`.
+ if !channel_type.requires_static_remote_key() {
+ return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
+ }
+ // Make sure we support all of the features behind the channel type.
+ if !channel_type.is_subset(our_supported_features) {
+ return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
+ }
+ let announced_channel = if (common_fields.channel_flags & 1) == 1 { true } else { false };
+ if channel_type.requires_scid_privacy() && announced_channel {
+ return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
+ }
+ Ok(channel_type.clone())
+ } else {
+ let channel_type = ChannelTypeFeatures::from_init(&their_features);
+ if channel_type != ChannelTypeFeatures::only_static_remote_key() {
+ return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
+ }
+ Ok(channel_type)
+ }
+}
- workaround_lnd_bug_4006: None,
- sent_message_awaiting_response: None,
+impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
+ /// Creates a new channel from a remote sides' request for one.
+ /// Assumes chain_hash has already been checked and corresponds with what we expect!
+ pub fn new<ES: Deref, F: Deref, L: Deref>(
+ fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
+ counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
+ their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
+ current_chain_height: u32, logger: &L, is_0conf: bool,
+ ) -> Result<InboundV1Channel<SP>, ChannelError>
+ where ES::Target: EntropySource,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
+ {
+ let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id));
- latest_inbound_scid_alias: None,
- outbound_scid_alias: 0,
+ // First check the channel type is known, failing before we do anything else if we don't
+ // support this channel type.
+ let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?;
- channel_pending_event_emitted: false,
- channel_ready_event_emitted: false,
+ let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.common_fields.funding_satoshis, config);
+ let counterparty_pubkeys = ChannelPublicKeys {
+ funding_pubkey: msg.common_fields.funding_pubkey,
+ revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
+ payment_point: msg.common_fields.payment_basepoint,
+ delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
+ htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
+ };
- #[cfg(any(test, fuzzing))]
- historical_inbound_htlc_fulfills: HashSet::new(),
+ let chan = Self {
+ context: ChannelContext::new_for_inbound_channel(
+ fee_estimator,
+ entropy_source,
+ signer_provider,
+ counterparty_node_id,
+ their_features,
+ user_id,
+ config,
+ current_chain_height,
+ &&logger,
+ is_0conf,
+ 0,
+ counterparty_pubkeys,
channel_type,
- channel_keys_id,
-
- blocked_monitor_updates: Vec::new(),
- },
+ holder_selected_channel_reserve_satoshis,
+ msg.channel_reserve_satoshis,
+ msg.push_msat,
+ msg.common_fields.clone(),
+ )?,
unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
};
-
Ok(chan)
}
let keys = self.context.get_holder_pubkeys();
msgs::AcceptChannel {
- temporary_channel_id: self.context.channel_id,
- dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
- max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
+ common_fields: msgs::CommonAcceptChannelFields {
+ temporary_channel_id: self.context.channel_id,
+ dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
+ max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
+ htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
+ minimum_depth: self.context.minimum_depth.unwrap(),
+ to_self_delay: self.context.get_holder_selected_contest_delay(),
+ max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
+ funding_pubkey: keys.funding_pubkey,
+ revocation_basepoint: keys.revocation_basepoint.to_public_key(),
+ payment_basepoint: keys.payment_point,
+ delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
+ htlc_basepoint: keys.htlc_basepoint.to_public_key(),
+ first_per_commitment_point,
+ shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
+ Some(script) => script.clone().into_inner(),
+ None => Builder::new().into_script(),
+ }),
+ channel_type: Some(self.context.channel_type.clone()),
+ },
channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
- htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
- minimum_depth: self.context.minimum_depth.unwrap(),
- to_self_delay: self.context.get_holder_selected_contest_delay(),
- max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
- funding_pubkey: keys.funding_pubkey,
- revocation_basepoint: keys.revocation_basepoint.to_public_key(),
- payment_point: keys.payment_point,
- delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
- htlc_basepoint: keys.htlc_basepoint.to_public_key(),
- first_per_commitment_point,
- shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
- Some(script) => script.clone().into_inner(),
- None => Builder::new().into_script(),
- }),
- channel_type: Some(self.context.channel_type.clone()),
#[cfg(taproot)]
next_local_nonce: None,
}
// Now that we're past error-generating stuff, update our local state:
self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
- self.context.channel_id = funding_txo.to_channel_id();
+ self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
self.context.cur_counterparty_commitment_transaction_number -= 1;
self.context.cur_holder_commitment_transaction_number -= 1;
&self.context.channel_transaction_parameters,
funding_redeemscript.clone(), self.context.channel_value_satoshis,
obscure_factor,
- holder_commitment_tx, best_block, self.context.counterparty_node_id);
+ holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
channel_monitor.provide_initial_counterparty_commitment_tx(
counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
self.context.cur_counterparty_commitment_transaction_number + 1,
// `ChannelMonitor`.
let mut channel = Channel {
context: self.context,
+ #[cfg(any(dual_funding, splicing))]
+ dual_funding_channel_context: None,
};
let need_channel_ready = channel.check_get_channel_ready(0).is_some();
channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
}
}
-const SERIALIZATION_VERSION: u8 = 3;
+// A not-yet-funded outbound (from holder) channel using V2 channel establishment.
+#[cfg(any(dual_funding, splicing))]
+pub(super) struct OutboundV2Channel<SP: Deref> where SP::Target: SignerProvider {
+ pub context: ChannelContext<SP>,
+ pub unfunded_context: UnfundedChannelContext,
+ #[cfg(any(dual_funding, splicing))]
+ pub dual_funding_context: DualFundingChannelContext,
+}
+
+#[cfg(any(dual_funding, splicing))]
+impl<SP: Deref> OutboundV2Channel<SP> where SP::Target: SignerProvider {
+ pub fn new<ES: Deref, F: Deref>(
+ fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
+ counterparty_node_id: PublicKey, their_features: &InitFeatures, funding_satoshis: u64,
+ user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64,
+ funding_confirmation_target: ConfirmationTarget,
+ ) -> Result<OutboundV2Channel<SP>, APIError>
+ where ES::Target: EntropySource,
+ F::Target: FeeEstimator,
+ {
+ let channel_keys_id = signer_provider.generate_channel_keys_id(false, funding_satoshis, user_id);
+ let holder_signer = signer_provider.derive_channel_signer(funding_satoshis, channel_keys_id);
+ let pubkeys = holder_signer.pubkeys().clone();
+
+ let temporary_channel_id = Some(ChannelId::temporary_v2_from_revocation_basepoint(&pubkeys.revocation_basepoint));
+
+ let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
+ funding_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
+
+ let funding_feerate_sat_per_1000_weight = fee_estimator.bounded_sat_per_1000_weight(funding_confirmation_target);
+ let funding_tx_locktime = current_chain_height;
+
+ let chan = Self {
+ context: ChannelContext::new_for_outbound_channel(
+ fee_estimator,
+ entropy_source,
+ signer_provider,
+ counterparty_node_id,
+ their_features,
+ funding_satoshis,
+ 0,
+ user_id,
+ config,
+ current_chain_height,
+ outbound_scid_alias,
+ temporary_channel_id,
+ holder_selected_channel_reserve_satoshis,
+ channel_keys_id,
+ holder_signer,
+ pubkeys,
+ )?,
+ unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
+ dual_funding_context: DualFundingChannelContext {
+ our_funding_satoshis: funding_satoshis,
+ their_funding_satoshis: 0,
+ funding_tx_locktime,
+ funding_feerate_sat_per_1000_weight,
+ }
+ };
+ Ok(chan)
+ }
+
+ /// If we receive an error message, it may only be a rejection of the channel type we tried,
+ /// not of our ability to open any channel at all. Thus, on error, we should first call this
+ /// and see if we get a new `OpenChannelV2` message, otherwise the channel is failed.
+ pub(crate) fn maybe_handle_error_without_close<F: Deref>(
+ &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
+ ) -> Result<msgs::OpenChannelV2, ()>
+ where
+ F::Target: FeeEstimator
+ {
+ self.context.maybe_downgrade_channel_features(fee_estimator)?;
+ Ok(self.get_open_channel_v2(chain_hash))
+ }
+
+ pub fn get_open_channel_v2(&self, chain_hash: ChainHash) -> msgs::OpenChannelV2 {
+ if self.context.have_received_message() {
+ debug_assert!(false, "Cannot generate an open_channel2 after we've moved forward");
+ }
+
+ if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+ debug_assert!(false, "Tried to send an open_channel2 for a channel that has already advanced");
+ }
+
+ let first_per_commitment_point = self.context.holder_signer.as_ref()
+ .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number,
+ &self.context.secp_ctx);
+ let second_per_commitment_point = self.context.holder_signer.as_ref()
+ .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number - 1,
+ &self.context.secp_ctx);
+ let keys = self.context.get_holder_pubkeys();
+
+ msgs::OpenChannelV2 {
+ common_fields: msgs::CommonOpenChannelFields {
+ chain_hash,
+ temporary_channel_id: self.context.temporary_channel_id.unwrap(),
+ funding_satoshis: self.context.channel_value_satoshis,
+ dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
+ max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
+ htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
+ commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw,
+ to_self_delay: self.context.get_holder_selected_contest_delay(),
+ max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
+ funding_pubkey: keys.funding_pubkey,
+ revocation_basepoint: keys.revocation_basepoint.to_public_key(),
+ payment_basepoint: keys.payment_point,
+ delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
+ htlc_basepoint: keys.htlc_basepoint.to_public_key(),
+ first_per_commitment_point,
+ channel_flags: if self.context.config.announced_channel {1} else {0},
+ shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
+ Some(script) => script.clone().into_inner(),
+ None => Builder::new().into_script(),
+ }),
+ channel_type: Some(self.context.channel_type.clone()),
+ },
+ funding_feerate_sat_per_1000_weight: self.context.feerate_per_kw,
+ second_per_commitment_point,
+ locktime: self.dual_funding_context.funding_tx_locktime,
+ require_confirmed_inputs: None,
+ }
+ }
+}
+
+// A not-yet-funded inbound (from counterparty) channel using V2 channel establishment.
+#[cfg(any(dual_funding, splicing))]
+pub(super) struct InboundV2Channel<SP: Deref> where SP::Target: SignerProvider {
+ pub context: ChannelContext<SP>,
+ pub unfunded_context: UnfundedChannelContext,
+ pub dual_funding_context: DualFundingChannelContext,
+}
+
+#[cfg(any(dual_funding, splicing))]
+impl<SP: Deref> InboundV2Channel<SP> where SP::Target: SignerProvider {
+ /// Creates a new dual-funded channel from a remote side's request for one.
+ /// Assumes chain_hash has already been checked and corresponds with what we expect!
+ pub fn new<ES: Deref, F: Deref, L: Deref>(
+ fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
+ counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
+ their_features: &InitFeatures, msg: &msgs::OpenChannelV2, funding_satoshis: u64, user_id: u128,
+ config: &UserConfig, current_chain_height: u32, logger: &L,
+ ) -> Result<InboundV2Channel<SP>, ChannelError>
+ where ES::Target: EntropySource,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
+ {
+ let channel_value_satoshis = funding_satoshis.saturating_add(msg.common_fields.funding_satoshis);
+ let counterparty_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
+ channel_value_satoshis, msg.common_fields.dust_limit_satoshis);
+ let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
+ channel_value_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
+
+ // First check the channel type is known, failing before we do anything else if we don't
+ // support this channel type.
+ if msg.common_fields.channel_type.is_none() {
+ return Err(ChannelError::Close(format!("Rejecting V2 channel {} missing channel_type",
+ msg.common_fields.temporary_channel_id)))
+ }
+ let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?;
+
+ let counterparty_pubkeys = ChannelPublicKeys {
+ funding_pubkey: msg.common_fields.funding_pubkey,
+ revocation_basepoint: RevocationBasepoint(msg.common_fields.revocation_basepoint),
+ payment_point: msg.common_fields.payment_basepoint,
+ delayed_payment_basepoint: DelayedPaymentBasepoint(msg.common_fields.delayed_payment_basepoint),
+ htlc_basepoint: HtlcBasepoint(msg.common_fields.htlc_basepoint)
+ };
+
+ let mut context = ChannelContext::new_for_inbound_channel(
+ fee_estimator,
+ entropy_source,
+ signer_provider,
+ counterparty_node_id,
+ their_features,
+ user_id,
+ config,
+ current_chain_height,
+ logger,
+ false,
+
+ funding_satoshis,
+
+ counterparty_pubkeys,
+ channel_type,
+ holder_selected_channel_reserve_satoshis,
+ counterparty_selected_channel_reserve_satoshis,
+ 0 /* push_msat not used in dual-funding */,
+ msg.common_fields.clone(),
+ )?;
+ let channel_id = ChannelId::v2_from_revocation_basepoints(
+ &context.get_holder_pubkeys().revocation_basepoint,
+ &context.get_counterparty_pubkeys().revocation_basepoint);
+ context.channel_id = channel_id;
+
+ let chan = Self {
+ context,
+ unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
+ dual_funding_context: DualFundingChannelContext {
+ our_funding_satoshis: funding_satoshis,
+ their_funding_satoshis: msg.common_fields.funding_satoshis,
+ funding_tx_locktime: msg.locktime,
+ funding_feerate_sat_per_1000_weight: msg.funding_feerate_sat_per_1000_weight,
+ }
+ };
+
+ Ok(chan)
+ }
+
+ /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannelV2`] message which
+ /// should be sent back to the counterparty node.
+ ///
+ /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
+ pub fn accept_inbound_dual_funded_channel(&mut self) -> msgs::AcceptChannelV2 {
+ if self.context.is_outbound() {
+ debug_assert!(false, "Tried to send accept_channel for an outbound channel?");
+ }
+ if !matches!(
+ self.context.channel_state, ChannelState::NegotiatingFunding(flags)
+ if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
+ ) {
+ debug_assert!(false, "Tried to send accept_channel2 after channel had moved forward");
+ }
+ if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+ debug_assert!(false, "Tried to send an accept_channel2 for a channel that has already advanced");
+ }
+
+ self.generate_accept_channel_v2_message()
+ }
+
+ /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
+ /// inbound channel. If the intention is to accept an inbound channel, use
+ /// [`InboundV1Channel::accept_inbound_channel`] instead.
+ ///
+ /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
+ fn generate_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 {
+ let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
+ self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ let second_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
+ self.context.cur_holder_commitment_transaction_number - 1, &self.context.secp_ctx);
+ let keys = self.context.get_holder_pubkeys();
+
+ msgs::AcceptChannelV2 {
+ common_fields: msgs::CommonAcceptChannelFields {
+ temporary_channel_id: self.context.temporary_channel_id.unwrap(),
+ dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
+ max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
+ htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
+ minimum_depth: self.context.minimum_depth.unwrap(),
+ to_self_delay: self.context.get_holder_selected_contest_delay(),
+ max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
+ funding_pubkey: keys.funding_pubkey,
+ revocation_basepoint: keys.revocation_basepoint.to_public_key(),
+ payment_basepoint: keys.payment_point,
+ delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
+ htlc_basepoint: keys.htlc_basepoint.to_public_key(),
+ first_per_commitment_point,
+ shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
+ Some(script) => script.clone().into_inner(),
+ None => Builder::new().into_script(),
+ }),
+ channel_type: Some(self.context.channel_type.clone()),
+ },
+ funding_satoshis: self.dual_funding_context.our_funding_satoshis,
+ second_per_commitment_point,
+ require_confirmed_inputs: None,
+ }
+ }
+
+ /// Enables the possibility for tests to extract a [`msgs::AcceptChannelV2`] message for an
+ /// inbound channel without accepting it.
+ ///
+ /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
+ #[cfg(test)]
+ pub fn get_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 {
+ self.generate_accept_channel_v2_message()
+ }
+}
+
+// Unfunded channel utilities
+
+fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
+ // The default channel type (ie the first one we try) depends on whether the channel is
+ // public - if it is, we just go with `only_static_remotekey` as it's the only option
+ // available. If it's private, we first try `scid_privacy` as it provides better privacy
+ // with no other changes, and fall back to `only_static_remotekey`.
+ let mut ret = ChannelTypeFeatures::only_static_remote_key();
+ if !config.channel_handshake_config.announced_channel &&
+ config.channel_handshake_config.negotiate_scid_privacy &&
+ their_features.supports_scid_privacy() {
+ ret.set_scid_privacy_required();
+ }
+
+ // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
+ // set it now. If they don't understand it, we'll fall back to our default of
+ // `only_static_remotekey`.
+ if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
+ their_features.supports_anchors_zero_fee_htlc_tx() {
+ ret.set_anchors_zero_fee_htlc_tx_required();
+ }
+
+ ret
+}
+
+const SERIALIZATION_VERSION: u8 = 4;
const MIN_SERIALIZATION_VERSION: u8 = 3;
impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
// Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
// called.
- write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
+ let version_to_write = if self.context.pending_inbound_htlcs.iter().any(|htlc| match htlc.state {
+ InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution)|
+ InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => {
+ matches!(htlc_resolution, InboundHTLCResolution::Pending { .. })
+ },
+ _ => false,
+ }) {
+ SERIALIZATION_VERSION
+ } else {
+ MIN_SERIALIZATION_VERSION
+ };
+ write_ver_prefix!(writer, version_to_write, MIN_SERIALIZATION_VERSION);
// `user_id` used to be a single u64 value. In order to remain backwards compatible with
// versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
let mut channel_state = self.context.channel_state;
if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
channel_state.set_peer_disconnected();
+ } else {
+ debug_assert!(false, "Pre-funded/shutdown channels should not be written");
}
channel_state.to_u32().write(writer)?;
}
htlc.payment_hash.write(writer)?;
match &htlc.state {
&InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
- &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
+ &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution) => {
1u8.write(writer)?;
- htlc_state.write(writer)?;
+ if version_to_write <= 3 {
+ if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution {
+ pending_htlc_status.write(writer)?;
+ } else {
+ panic!();
+ }
+ } else {
+ htlc_resolution.write(writer)?;
+ }
},
- &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
+ &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => {
2u8.write(writer)?;
- htlc_state.write(writer)?;
+ if version_to_write <= 3 {
+ if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution {
+ pending_htlc_status.write(writer)?;
+ } else {
+ panic!();
+ }
+ } else {
+ htlc_resolution.write(writer)?;
+ }
},
&InboundHTLCState::Committed => {
3u8.write(writer)?;
let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
+ let mut monitor_pending_update_adds = None;
+ if !self.context.monitor_pending_update_adds.is_empty() {
+ monitor_pending_update_adds = Some(&self.context.monitor_pending_update_adds);
+ }
+
write_tlv_fields!(writer, {
(0, self.context.announcement_sigs, option),
// minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
(7, self.context.shutdown_scriptpubkey, option),
(8, self.context.blocked_monitor_updates, optional_vec),
(9, self.context.target_closing_feerate_sats_per_kw, option),
+ (10, monitor_pending_update_adds, option), // Added in 0.0.122
(11, self.context.monitor_pending_finalized_fulfills, required_vec),
(13, self.context.channel_creation_height, required),
(15, preimages, required_vec),
(39, pending_outbound_blinding_points, optional_vec),
(41, holding_cell_blinding_points, optional_vec),
(43, malformed_htlcs, optional_vec), // Added in 0.0.119
+ // 45 and 47 are reserved for async signing
+ (49, self.context.local_initiated_shutdown, option), // Added in 0.0.122
});
Ok(())
cltv_expiry: Readable::read(reader)?,
payment_hash: Readable::read(reader)?,
state: match <u8 as Readable>::read(reader)? {
- 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
- 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
+ 1 => {
+ let resolution = if ver <= 3 {
+ InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? }
+ } else {
+ Readable::read(reader)?
+ };
+ InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution)
+ },
+ 2 => {
+ let resolution = if ver <= 3 {
+ InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? }
+ } else {
+ Readable::read(reader)?
+ };
+ InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution)
+ },
3 => InboundHTLCState::Committed,
4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
_ => return Err(DecodeError::InvalidValue),
let channel_update_status = Readable::read(reader)?;
#[cfg(any(test, fuzzing))]
- let mut historical_inbound_htlc_fulfills = HashSet::new();
+ let mut historical_inbound_htlc_fulfills = new_hash_set();
#[cfg(any(test, fuzzing))]
{
let htlc_fulfills_len: u64 = Readable::read(reader)?;
let mut is_batch_funding: Option<()> = None;
+ let mut local_initiated_shutdown: Option<()> = None;
+
let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
+ let mut monitor_pending_update_adds: Option<Vec<msgs::UpdateAddHTLC>> = None;
read_tlv_fields!(reader, {
(0, announcement_sigs, option),
(7, shutdown_scriptpubkey, option),
(8, blocked_monitor_updates, optional_vec),
(9, target_closing_feerate_sats_per_kw, option),
+ (10, monitor_pending_update_adds, option), // Added in 0.0.122
(11, monitor_pending_finalized_fulfills, optional_vec),
(13, channel_creation_height, option),
(15, preimages_opt, optional_vec),
(39, pending_outbound_blinding_points_opt, optional_vec),
(41, holding_cell_blinding_points_opt, optional_vec),
(43, malformed_htlcs, optional_vec), // Added in 0.0.119
+ // 45 and 47 are reserved for async signing
+ (49, local_initiated_shutdown, option),
});
let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
monitor_pending_forwards,
monitor_pending_failures,
monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
+ monitor_pending_update_adds: monitor_pending_update_adds.unwrap_or(Vec::new()),
signer_pending_commitment_update: false,
signer_pending_funding: false,
channel_type: channel_type.unwrap(),
channel_keys_id,
+ local_initiated_shutdown,
+
blocked_monitor_updates: blocked_monitor_updates.unwrap(),
- }
+ },
+ #[cfg(any(dual_funding, splicing))]
+ dual_funding_channel_context: None,
})
}
}
use bitcoin::address::{WitnessProgram, WitnessVersion};
use crate::prelude::*;
+ #[test]
+ fn test_channel_state_order() {
+ use crate::ln::channel::NegotiatingFundingFlags;
+ use crate::ln::channel::AwaitingChannelReadyFlags;
+ use crate::ln::channel::ChannelReadyFlags;
+
+ assert!(ChannelState::NegotiatingFunding(NegotiatingFundingFlags::new()) < ChannelState::FundingNegotiated);
+ assert!(ChannelState::FundingNegotiated < ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()));
+ assert!(ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()) < ChannelState::ChannelReady(ChannelReadyFlags::new()));
+ assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete);
+ }
+
struct TestFeeEstimator {
fee_est: u32
}
// same as the old fee.
fee_est.fee_est = 500;
let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
- assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
+ assert_eq!(open_channel_msg.common_fields.commitment_feerate_sat_per_1000_weight, original_fee);
}
#[test]
// Node B --> Node A: accept channel, explicitly setting B's dust limit.
let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
- accept_channel_msg.dust_limit_satoshis = 546;
+ accept_channel_msg.common_fields.dust_limit_satoshis = 546;
node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
node_a_chan.context.holder_dust_limit_satoshis = 1560;
// Node B --> Node A: accept channel, explicitly setting B's dust limit.
let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
- accept_channel_msg.dust_limit_satoshis = 546;
+ accept_channel_msg.common_fields.dust_limit_satoshis = 546;
node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
node_a_chan.context.holder_dust_limit_satoshis = 1560;
fn blinding_point_skimmed_fee_malformed_ser() {
// Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
// properly.
+ let logger = test_utils::TestLogger::new();
let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
let secp_ctx = Secp256k1::new();
let seed = [42; 32];
let network = Network::Testnet;
+ let best_block = BestBlock::from_network(network);
let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
let features = channelmanager::provided_init_features(&config);
- let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
- let mut chan = Channel { context: outbound_chan.context };
+ let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(
+ &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None
+ ).unwrap();
+ let inbound_chan = InboundV1Channel::<&TestKeysInterface>::new(
+ &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config),
+ &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network)), 7, &config, 0, &&logger, false
+ ).unwrap();
+ outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(), &config.channel_handshake_limits, &features).unwrap();
+ let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
+ value: 10000000, script_pubkey: outbound_chan.context.get_funding_redeemscript(),
+ }]};
+ let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
+ let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap();
+ let mut chan = match inbound_chan.funding_created(&funding_created, best_block, &&keys_provider, &&logger) {
+ Ok((chan, _, _)) => chan,
+ Err((_, e)) => panic!("{}", e),
+ };
let dummy_htlc_source = HTLCSource::OutboundRoute {
path: Path {
channel_type_features.set_zero_conf_required();
let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
- open_channel_msg.channel_type = Some(channel_type_features);
+ open_channel_msg.common_fields.channel_type = Some(channel_type_features);
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
node_b_node_id, &channelmanager::provided_channel_type_features(&config),
// Set `channel_type` to `None` to force the implicit feature negotiation.
let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
- open_channel_msg.channel_type = None;
+ open_channel_msg.common_fields.channel_type = None;
// Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
// `static_remote_key`, it will fail the channel.
).unwrap();
let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
- open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
+ open_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
let res = InboundV1Channel::<&TestKeysInterface>::new(
&fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
).unwrap();
let mut accept_channel_msg = channel_b.get_accept_channel_message();
- accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
+ accept_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
let res = channel_a.accept_channel(
&accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
//! ChannelId definition.
+use crate::chain::transaction::OutPoint;
+use crate::io;
use crate::ln::msgs::DecodeError;
use crate::sign::EntropySource;
use crate::util::ser::{Readable, Writeable, Writer};
+use super::channel_keys::RevocationBasepoint;
-use crate::io;
+use bitcoin::hashes::{
+ Hash as _,
+ HashEngine as _,
+ sha256::Hash as Sha256,
+};
use core::fmt;
use core::ops::Deref;
Self(res)
}
+ /// Create _v1_ channel ID from a funding tx outpoint
+ pub fn v1_from_funding_outpoint(outpoint: OutPoint) -> Self {
+ Self::v1_from_funding_txid(outpoint.txid.as_byte_array(), outpoint.index)
+ }
+
/// Create a _temporary_ channel ID randomly, based on an entropy source.
pub fn temporary_from_entropy_source<ES: Deref>(entropy_source: &ES) -> Self
where ES::Target: EntropySource {
pub fn is_zero(&self) -> bool {
self.0[..] == [0; 32]
}
+
+ /// Create _v2_ channel ID by concatenating the holder revocation basepoint with the counterparty
+ /// revocation basepoint and hashing the result. The basepoints will be concatenated in increasing
+ /// sorted order.
+ pub fn v2_from_revocation_basepoints(
+ ours: &RevocationBasepoint,
+ theirs: &RevocationBasepoint,
+ ) -> Self {
+ let ours = ours.0.serialize();
+ let theirs = theirs.0.serialize();
+ let (lesser, greater) = if ours < theirs {
+ (ours, theirs)
+ } else {
+ (theirs, ours)
+ };
+ let mut engine = Sha256::engine();
+ engine.input(&lesser[..]);
+ engine.input(&greater[..]);
+ Self(Sha256::from_engine(engine).to_byte_array())
+ }
+
+ /// Create temporary _v2_ channel ID by concatenating a zeroed out basepoint with the holder
+ /// revocation basepoint and hashing the result.
+ pub fn temporary_v2_from_revocation_basepoint(our_revocation_basepoint: &RevocationBasepoint) -> Self {
+ Self(Sha256::hash(&[[0u8; 33], our_revocation_basepoint.0.serialize()].concat()).to_byte_array())
+ }
}
impl Writeable for ChannelId {
#[cfg(test)]
mod tests {
+ use bitcoin::hashes::{
+ Hash as _,
+ HashEngine as _,
+ hex::FromHex as _,
+ sha256::Hash as Sha256,
+ };
+ use bitcoin::secp256k1::PublicKey;
use hex::DisplayHex;
use crate::ln::ChannelId;
+ use crate::ln::channel_keys::RevocationBasepoint;
use crate::util::ser::{Readable, Writeable};
use crate::util::test_utils;
use crate::prelude::*;
let channel_id = ChannelId::v1_from_funding_txid(&[2; 32], 1);
assert_eq!(format!("{}", &channel_id), "0202020202020202020202020202020202020202020202020202020202020203");
}
+
+ #[test]
+ fn test_channel_id_v2_from_basepoints() {
+ // Ours greater than theirs
+ let ours = RevocationBasepoint(PublicKey::from_slice(&<Vec<u8>>::from_hex("0324653eac434488002cc06bbfb7f10fe18991e35f9fe4302dbea6d2353dc0ab1c").unwrap()[..]).unwrap());
+ let theirs = RevocationBasepoint(PublicKey::from_slice(&<Vec<u8>>::from_hex("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]).unwrap());
+
+ let mut engine = Sha256::engine();
+ engine.input(&theirs.0.serialize());
+ engine.input(&ours.0.serialize());
+ let expected_id = ChannelId(Sha256::from_engine(engine).to_byte_array());
+
+ assert_eq!(ChannelId::v2_from_revocation_basepoints(&ours, &theirs), expected_id);
+
+ // Theirs greater than ours
+ let ours = RevocationBasepoint(PublicKey::from_slice(&<Vec<u8>>::from_hex("027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007").unwrap()[..]).unwrap());
+ let theirs = RevocationBasepoint(PublicKey::from_slice(&<Vec<u8>>::from_hex("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]).unwrap());
+
+ let mut engine = Sha256::engine();
+ engine.input(&ours.0.serialize());
+ engine.input(&theirs.0.serialize());
+ let expected_id = ChannelId(Sha256::from_engine(engine).to_byte_array());
+
+ assert_eq!(ChannelId::v2_from_revocation_basepoints(&ours, &theirs), expected_id);
+ }
}
//! Keys used to generate commitment transactions.
//! See: <https://github.com/lightning/bolts/blob/master/03-transactions.md#keys>
-use bitcoin::hashes::Hash;
-use bitcoin::hashes::HashEngine;
-use bitcoin::secp256k1::Scalar;
-use bitcoin::secp256k1::SecretKey;
-use bitcoin::secp256k1::Secp256k1;
-use bitcoin::secp256k1;
+use crate::io;
use crate::ln::msgs::DecodeError;
use crate::util::ser::Readable;
-use crate::io;
-use crate::util::ser::Writer;
use crate::util::ser::Writeable;
-use bitcoin::secp256k1::PublicKey;
+use crate::util::ser::Writer;
use bitcoin::hashes::sha256::Hash as Sha256;
+use bitcoin::hashes::Hash;
+use bitcoin::hashes::HashEngine;
+use bitcoin::secp256k1;
+use bitcoin::secp256k1::PublicKey;
+use bitcoin::secp256k1::Scalar;
+use bitcoin::secp256k1::Secp256k1;
+use bitcoin::secp256k1::SecretKey;
macro_rules! doc_comment {
($x:expr, $($tt:tt)*) => {
};
}
macro_rules! basepoint_impl {
- ($BasepointT:ty) => {
+ ($BasepointT:ty $(, $KeyName: expr)?) => {
impl $BasepointT {
/// Get inner Public Key
pub fn to_public_key(&self) -> PublicKey {
self.0
}
+
+ $(doc_comment!(
+ concat!(
+ "Derives the \"tweak\" used in calculate [`", $KeyName, "::from_basepoint`].\n",
+ "\n",
+ "[`", $KeyName, "::from_basepoint`] calculates a private key as:\n",
+ "`privkey = basepoint_secret + SHA256(per_commitment_point || basepoint)`\n",
+ "\n",
+ "This calculates the hash part in the tweak derivation process, which is used to\n",
+ "ensure that each key is unique and cannot be guessed by an external party."
+ ),
+ pub fn derive_add_tweak(&self, per_commitment_point: &PublicKey) -> Sha256 {
+ let mut sha = Sha256::engine();
+ sha.input(&per_commitment_point.serialize());
+ sha.input(&self.to_public_key().serialize());
+ Sha256::from_engine(sha)
+ });
+ )?
}
impl From<PublicKey> for $BasepointT {
Self(value)
}
}
-
- }
+ };
}
macro_rules! key_impl {
($BasepointT:ty, $KeyName:expr) => {
Ok(Self(key))
}
}
- }
+ };
}
-
-
/// Base key used in conjunction with a `per_commitment_point` to generate a [`DelayedPaymentKey`].
///
/// The delayed payment key is used to pay the commitment state broadcaster their
/// state broadcasted was previously revoked.
#[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)]
pub struct DelayedPaymentBasepoint(pub PublicKey);
-basepoint_impl!(DelayedPaymentBasepoint);
+basepoint_impl!(DelayedPaymentBasepoint, "DelayedPaymentKey");
key_read_write!(DelayedPaymentBasepoint);
-
/// A derived key built from a [`DelayedPaymentBasepoint`] and `per_commitment_point`.
///
/// The delayed payment key is used to pay the commitment state broadcaster their
/// Thus, both channel counterparties' HTLC keys will appears in each HTLC output's script.
#[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)]
pub struct HtlcBasepoint(pub PublicKey);
-basepoint_impl!(HtlcBasepoint);
+basepoint_impl!(HtlcBasepoint, "HtlcKey");
key_read_write!(HtlcBasepoint);
/// A derived key built from a [`HtlcBasepoint`] and `per_commitment_point`.
/// Derives a per-commitment-transaction public key (eg an htlc key or a delayed_payment key)
/// from the base point and the per_commitment_key. This is the public equivalent of
/// derive_private_key - using only public keys to derive a public key instead of private keys.
-fn derive_public_key<T: secp256k1::Signing>(secp_ctx: &Secp256k1<T>, per_commitment_point: &PublicKey, base_point: &PublicKey) -> PublicKey {
+fn derive_public_key<T: secp256k1::Signing>(
+ secp_ctx: &Secp256k1<T>, per_commitment_point: &PublicKey, base_point: &PublicKey,
+) -> PublicKey {
let mut sha = Sha256::engine();
sha.input(&per_commitment_point.serialize());
sha.input(&base_point.serialize());
- let res = Sha256::from_engine(sha).to_byte_array();
+ let res = Sha256::from_engine(sha);
- let hashkey = PublicKey::from_secret_key(&secp_ctx,
- &SecretKey::from_slice(&res).expect("Hashes should always be valid keys unless SHA-256 is broken"));
+ add_public_key_tweak(secp_ctx, base_point, &res)
+}
+
+/// Adds a tweak to a public key to derive a new public key.
+///
+/// May panic if `tweak` is not the output of a SHA-256 hash.
+pub fn add_public_key_tweak<T: secp256k1::Signing>(
+ secp_ctx: &Secp256k1<T>, base_point: &PublicKey, tweak: &Sha256,
+) -> PublicKey {
+ let hashkey = PublicKey::from_secret_key(
+ &secp_ctx,
+ &SecretKey::from_slice(tweak.as_byte_array())
+ .expect("Hashes should always be valid keys unless SHA-256 is broken"),
+ );
base_point.combine(&hashkey)
.expect("Addition only fails if the tweak is the inverse of the key. This is not possible when the tweak contains the hash of the key.")
}
basepoint_impl!(RevocationBasepoint);
key_read_write!(RevocationBasepoint);
-
/// The revocation key is used to allow a channel party to revoke their state - giving their
/// counterparty the required material to claim all of their funds if they broadcast that state.
///
///
/// [`chan_utils::derive_private_revocation_key`]: crate::ln::chan_utils::derive_private_revocation_key
pub fn from_basepoint<T: secp256k1::Verification>(
- secp_ctx: &Secp256k1<T>,
- countersignatory_basepoint: &RevocationBasepoint,
+ secp_ctx: &Secp256k1<T>, countersignatory_basepoint: &RevocationBasepoint,
per_commitment_point: &PublicKey,
) -> Self {
let rev_append_commit_hash_key = {
}
key_read_write!(RevocationKey);
-
#[cfg(test)]
mod test {
- use bitcoin::secp256k1::{Secp256k1, SecretKey, PublicKey};
- use bitcoin::hashes::hex::FromHex;
use super::derive_public_key;
+ use bitcoin::hashes::hex::FromHex;
+ use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
#[test]
fn test_key_derivation() {
// Test vectors from BOLT 3 Appendix E:
let secp_ctx = Secp256k1::new();
- let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
- let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
+ let base_secret = SecretKey::from_slice(
+ &<Vec<u8>>::from_hex(
+ "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f",
+ )
+ .unwrap()[..],
+ )
+ .unwrap();
+ let per_commitment_secret = SecretKey::from_slice(
+ &<Vec<u8>>::from_hex(
+ "1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100",
+ )
+ .unwrap()[..],
+ )
+ .unwrap();
let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
- assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
+ assert_eq!(
+ base_point.serialize()[..],
+ <Vec<u8>>::from_hex(
+ "036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2"
+ )
+ .unwrap()[..]
+ );
let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
- assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
-
- assert_eq!(derive_public_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
- <Vec<u8>>::from_hex("0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5").unwrap()[..]);
+ assert_eq!(
+ per_commitment_point.serialize()[..],
+ <Vec<u8>>::from_hex(
+ "025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486"
+ )
+ .unwrap()[..]
+ );
+
+ assert_eq!(
+ derive_public_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
+ <Vec<u8>>::from_hex(
+ "0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5"
+ )
+ .unwrap()[..]
+ );
}
}
use bitcoin::secp256k1::Secp256k1;
use bitcoin::{secp256k1, Sequence};
-use crate::blinded_path::BlindedPath;
-use crate::blinded_path::payment::{PaymentConstraints, ReceiveTlvs};
+use crate::blinded_path::{BlindedPath, NodeIdLookUp};
+use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, PaymentConstraints, PaymentContext, ReceiveTlvs};
use crate::chain;
use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
// construct one themselves.
use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
use crate::ln::channel::{self, Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext};
+pub use crate::ln::channel::{InboundHTLCDetails, InboundHTLCStateDetails, OutboundHTLCDetails, OutboundHTLCStateDetails};
use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
#[cfg(any(feature = "_test_utils", test))]
use crate::ln::features::Bolt11InvoiceFeatures;
use crate::ln::outbound_payment;
use crate::ln::outbound_payment::{Bolt12PaymentError, OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs, StaleExpiration};
use crate::ln::wire::Encode;
-use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, InvoiceBuilder};
+use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, ExplicitSigningPubkey, InvoiceBuilder, UnsignedBolt12Invoice};
use crate::offers::invoice_error::InvoiceError;
-use crate::offers::merkle::SignError;
-use crate::offers::offer::{DerivedMetadata, Offer, OfferBuilder};
+use crate::offers::invoice_request::{DerivedPayerId, InvoiceRequestBuilder};
+use crate::offers::offer::{Offer, OfferBuilder};
use crate::offers::parse::Bolt12SemanticError;
use crate::offers::refund::{Refund, RefundBuilder};
use crate::onion_message::messenger::{Destination, MessageRouter, PendingOnionMessage, new_pending_onion_message};
use crate::util::errors::APIError;
#[cfg(not(c_bindings))]
use {
+ crate::offers::offer::DerivedMetadata,
crate::routing::router::DefaultRouter,
crate::routing::gossip::NetworkGraph,
crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters},
crate::sign::KeysManager,
};
+#[cfg(c_bindings)]
+use {
+ crate::offers::offer::OfferWithDerivedMetadataBuilder,
+ crate::offers::refund::RefundMaybeWithDerivedMetadataBuilder,
+};
use alloc::collections::{btree_map, BTreeMap};
/// [`Event::PaymentClaimable::onion_fields`] as
/// [`RecipientOnionFields::payment_metadata`].
payment_metadata: Option<Vec<u8>>,
+ /// The context of the payment included by the recipient in a blinded path, or `None` if a
+ /// blinded path was not used.
+ ///
+ /// Used in part to determine the [`events::PaymentPurpose`].
+ payment_context: Option<PaymentContext>,
/// CLTV expiry of the received HTLC.
///
/// Used to track when we should expire pending HTLCs that go unclaimed.
/// For HTLCs received by LDK, these will ultimately bubble back up as
/// [`RecipientOnionFields::custom_tlvs`].
custom_tlvs: Vec<(u64, Vec<u8>)>,
+ /// Set if this HTLC is the final hop in a multi-hop blinded path.
+ requires_blinded_error: bool,
},
}
match self {
Self::Forward { blinded: Some(BlindedForward { failure, .. }), .. } => Some(*failure),
Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
+ Self::ReceiveKeysend { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
_ => None,
}
}
// Note that this may be an outbound SCID alias for the associated channel.
prev_short_channel_id: u64,
prev_htlc_id: u64,
+ prev_channel_id: ChannelId,
prev_funding_outpoint: OutPoint,
prev_user_channel_id: u128,
}
incoming_packet_shared_secret: [u8; 32],
phantom_shared_secret: Option<[u8; 32]>,
blinded_failure: Option<BlindedFailure>,
+ channel_id: ChannelId,
// This field is consumed by `claim_funds_from_hop()` when updating a force-closed backwards
// channel with a preimage provided by the forward channel.
/// This is only here for backwards-compatibility in serialization, in the future it can be
/// removed, breaking clients running 0.0.106 and earlier.
_legacy_hop_data: Option<msgs::FinalOnionHopData>,
+ /// The context of the payment included by the recipient in a blinded path, or `None` if a
+ /// blinded path was not used.
+ ///
+ /// Used in part to determine the [`events::PaymentPurpose`].
+ payment_context: Option<PaymentContext>,
},
/// Contains the payer-provided preimage.
Spontaneous(PaymentPreimage),
impl From<&ClaimableHTLC> for events::ClaimedHTLC {
fn from(val: &ClaimableHTLC) -> Self {
events::ClaimedHTLC {
- channel_id: val.prev_hop.outpoint.to_channel_id(),
+ channel_id: val.prev_hop.channel_id,
user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0),
cltv_expiry: val.cltv_expiry,
value_msat: val.value,
///
/// Note that any such events are lost on shutdown, so in general they must be updates which
/// are regenerated on startup.
- ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
+ ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelId, ChannelMonitorUpdate)),
/// Handle a ChannelMonitorUpdate which may or may not close the channel and may unblock the
/// channel to continue normal operation.
///
MonitorUpdateRegeneratedOnStartup {
counterparty_node_id: PublicKey,
funding_txo: OutPoint,
+ channel_id: ChannelId,
update: ChannelMonitorUpdate
},
/// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have
/// outbound edge.
EmitEventAndFreeOtherChannel {
event: events::Event,
- downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, RAAMonitorUpdateBlockingAction)>,
+ downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, ChannelId, RAAMonitorUpdateBlockingAction)>,
},
/// Indicates we should immediately resume the operation of another channel, unless there is
/// some other reason why the channel is blocked. In practice this simply means immediately
downstream_counterparty_node_id: PublicKey,
downstream_funding_outpoint: OutPoint,
blocking_action: RAAMonitorUpdateBlockingAction,
+ downstream_channel_id: ChannelId,
},
}
(0, downstream_counterparty_node_id, required),
(2, downstream_funding_outpoint, required),
(4, blocking_action, required),
+ // Note that by the time we get past the required read above, downstream_funding_outpoint will be
+ // filled in, so we can safely unwrap it here.
+ (5, downstream_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(downstream_funding_outpoint.0.unwrap()))),
},
(2, EmitEventAndFreeOtherChannel) => {
(0, event, upgradable_required),
ReleaseRAAChannelMonitorUpdate {
counterparty_node_id: PublicKey,
channel_funding_outpoint: OutPoint,
+ channel_id: ChannelId,
},
}
impl_writeable_tlv_based_enum!(EventCompletionAction,
(0, ReleaseRAAChannelMonitorUpdate) => {
(0, channel_funding_outpoint, required),
(2, counterparty_node_id, required),
+ // Note that by the time we get past the required read above, channel_funding_outpoint will be
+ // filled in, so we can safely unwrap it here.
+ (3, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.0.unwrap()))),
};
);
impl RAAMonitorUpdateBlockingAction {
fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
Self::ForwardedPaymentInboundClaim {
- channel_id: prev_hop.outpoint.to_channel_id(),
+ channel_id: prev_hop.channel_id,
htlc_id: prev_hop.htlc_id,
}
}
/// The peer is currently connected (i.e. we've seen a
/// [`ChannelMessageHandler::peer_connected`] and no corresponding
/// [`ChannelMessageHandler::peer_disconnected`].
- is_connected: bool,
+ pub is_connected: bool,
}
impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
if require_disconnected && self.is_connected {
return false
}
- self.channel_by_id.iter().filter(|(_, phase)| matches!(phase, ChannelPhase::Funded(_))).count() == 0
+ !self.channel_by_id.iter().any(|(_, phase)|
+ match phase {
+ ChannelPhase::Funded(_) | ChannelPhase::UnfundedOutboundV1(_) => true,
+ ChannelPhase::UnfundedInboundV1(_) => false,
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedOutboundV2(_) => true,
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedInboundV2(_) => false,
+ }
+ )
&& self.monitor_update_blocked_actions.is_empty()
&& self.in_flight_monitor_updates.is_empty()
}
Arc<DefaultRouter<
Arc<NetworkGraph<Arc<L>>>,
Arc<L>,
+ Arc<KeysManager>,
Arc<RwLock<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
ProbabilisticScoringFeeParameters,
ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
&'e DefaultRouter<
&'f NetworkGraph<&'g L>,
&'g L,
+ &'c KeysManager,
&'h RwLock<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>,
ProbabilisticScoringFeeParameters,
ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>
fn get_cm(&self) -> &ChannelManager<M, T, ES, NS, SP, F, R, L> { self }
}
-/// Manager which keeps track of a number of channels and sends messages to the appropriate
-/// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
+/// A lightning node's channel state machine and payment management logic, which facilitates
+/// sending, forwarding, and receiving payments through lightning channels.
+///
+/// [`ChannelManager`] is parameterized by a number of components to achieve this.
+/// - [`chain::Watch`] (typically [`ChainMonitor`]) for on-chain monitoring and enforcement of each
+/// channel
+/// - [`BroadcasterInterface`] for broadcasting transactions related to opening, funding, and
+/// closing channels
+/// - [`EntropySource`] for providing random data needed for cryptographic operations
+/// - [`NodeSigner`] for cryptographic operations scoped to the node
+/// - [`SignerProvider`] for providing signers whose operations are scoped to individual channels
+/// - [`FeeEstimator`] to determine transaction fee rates needed to have a transaction mined in a
+/// timely manner
+/// - [`Router`] for finding payment paths when initiating and retrying payments
+/// - [`Logger`] for logging operational information of varying degrees
+///
+/// Additionally, it implements the following traits:
+/// - [`ChannelMessageHandler`] to handle off-chain channel activity from peers
+/// - [`MessageSendEventsProvider`] to similarly send such messages to peers
+/// - [`OffersMessageHandler`] for BOLT 12 message handling and sending
+/// - [`EventsProvider`] to generate user-actionable [`Event`]s
+/// - [`chain::Listen`] and [`chain::Confirm`] for notification of on-chain activity
+///
+/// Thus, [`ChannelManager`] is typically used to parameterize a [`MessageHandler`] and an
+/// [`OnionMessenger`]. The latter is required to support BOLT 12 functionality.
+///
+/// # `ChannelManager` vs `ChannelMonitor`
+///
+/// It's important to distinguish between the *off-chain* management and *on-chain* enforcement of
+/// lightning channels. [`ChannelManager`] exchanges messages with peers to manage the off-chain
+/// state of each channel. During this process, it generates a [`ChannelMonitor`] for each channel
+/// and a [`ChannelMonitorUpdate`] for each relevant change, notifying its parameterized
+/// [`chain::Watch`] of them.
+///
+/// An implementation of [`chain::Watch`], such as [`ChainMonitor`], is responsible for aggregating
+/// these [`ChannelMonitor`]s and applying any [`ChannelMonitorUpdate`]s to them. It then monitors
+/// for any pertinent on-chain activity, enforcing claims as needed.
+///
+/// This division of off-chain management and on-chain enforcement allows for interesting node
+/// setups. For instance, on-chain enforcement could be moved to a separate host or have added
+/// redundancy, possibly as a watchtower. See [`chain::Watch`] for the relevant interface.
+///
+/// # Initialization
+///
+/// Use [`ChannelManager::new`] with the most recent [`BlockHash`] when creating a fresh instance.
+/// Otherwise, if restarting, construct [`ChannelManagerReadArgs`] with the necessary parameters and
+/// references to any deserialized [`ChannelMonitor`]s that were previously persisted. Use this to
+/// deserialize the [`ChannelManager`] and feed it any new chain data since it was last online, as
+/// detailed in the [`ChannelManagerReadArgs`] documentation.
+///
+/// ```
+/// use bitcoin::BlockHash;
+/// use bitcoin::network::constants::Network;
+/// use lightning::chain::BestBlock;
+/// # use lightning::chain::channelmonitor::ChannelMonitor;
+/// use lightning::ln::channelmanager::{ChainParameters, ChannelManager, ChannelManagerReadArgs};
+/// # use lightning::routing::gossip::NetworkGraph;
+/// use lightning::util::config::UserConfig;
+/// use lightning::util::ser::ReadableArgs;
+///
+/// # fn read_channel_monitors() -> Vec<ChannelMonitor<lightning::sign::InMemorySigner>> { vec![] }
+/// # fn example<
+/// # 'a,
+/// # L: lightning::util::logger::Logger,
+/// # ES: lightning::sign::EntropySource,
+/// # S: for <'b> lightning::routing::scoring::LockableScore<'b, ScoreLookUp = SL>,
+/// # SL: lightning::routing::scoring::ScoreLookUp<ScoreParams = SP>,
+/// # SP: Sized,
+/// # R: lightning::io::Read,
+/// # >(
+/// # fee_estimator: &dyn lightning::chain::chaininterface::FeeEstimator,
+/// # chain_monitor: &dyn lightning::chain::Watch<lightning::sign::InMemorySigner>,
+/// # tx_broadcaster: &dyn lightning::chain::chaininterface::BroadcasterInterface,
+/// # router: &lightning::routing::router::DefaultRouter<&NetworkGraph<&'a L>, &'a L, &ES, &S, SP, SL>,
+/// # logger: &L,
+/// # entropy_source: &ES,
+/// # node_signer: &dyn lightning::sign::NodeSigner,
+/// # signer_provider: &lightning::sign::DynSignerProvider,
+/// # best_block: lightning::chain::BestBlock,
+/// # current_timestamp: u32,
+/// # mut reader: R,
+/// # ) -> Result<(), lightning::ln::msgs::DecodeError> {
+/// // Fresh start with no channels
+/// let params = ChainParameters {
+/// network: Network::Bitcoin,
+/// best_block,
+/// };
+/// let default_config = UserConfig::default();
+/// let channel_manager = ChannelManager::new(
+/// fee_estimator, chain_monitor, tx_broadcaster, router, logger, entropy_source, node_signer,
+/// signer_provider, default_config, params, current_timestamp
+/// );
+///
+/// // Restart from deserialized data
+/// let mut channel_monitors = read_channel_monitors();
+/// let args = ChannelManagerReadArgs::new(
+/// entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster,
+/// router, logger, default_config, channel_monitors.iter_mut().collect()
+/// );
+/// let (block_hash, channel_manager) =
+/// <(BlockHash, ChannelManager<_, _, _, _, _, _, _, _>)>::read(&mut reader, args)?;
+///
+/// // Update the ChannelManager and ChannelMonitors with the latest chain data
+/// // ...
+///
+/// // Move the monitors to the ChannelManager's chain::Watch parameter
+/// for monitor in channel_monitors {
+/// chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
+/// }
+/// # Ok(())
+/// # }
+/// ```
+///
+/// # Operation
+///
+/// The following is required for [`ChannelManager`] to function properly:
+/// - Handle messages from peers using its [`ChannelMessageHandler`] implementation (typically
+/// called by [`PeerManager::read_event`] when processing network I/O)
+/// - Send messages to peers obtained via its [`MessageSendEventsProvider`] implementation
+/// (typically initiated when [`PeerManager::process_events`] is called)
+/// - Feed on-chain activity using either its [`chain::Listen`] or [`chain::Confirm`] implementation
+/// as documented by those traits
+/// - Perform any periodic channel and payment checks by calling [`timer_tick_occurred`] roughly
+/// every minute
+/// - Persist to disk whenever [`get_and_clear_needs_persistence`] returns `true` using a
+/// [`Persister`] such as a [`KVStore`] implementation
+/// - Handle [`Event`]s obtained via its [`EventsProvider`] implementation
+///
+/// The [`Future`] returned by [`get_event_or_persistence_needed_future`] is useful in determining
+/// when the last two requirements need to be checked.
+///
+/// The [`lightning-block-sync`] and [`lightning-transaction-sync`] crates provide utilities that
+/// simplify feeding in on-chain activity using the [`chain::Listen`] and [`chain::Confirm`] traits,
+/// respectively. The remaining requirements can be met using the [`lightning-background-processor`]
+/// crate. For languages other than Rust, the availability of similar utilities may vary.
+///
+/// # Channels
+///
+/// [`ChannelManager`]'s primary function involves managing a channel state. Without channels,
+/// payments can't be sent. Use [`list_channels`] or [`list_usable_channels`] for a snapshot of the
+/// currently open channels.
+///
+/// ```
+/// # use lightning::ln::channelmanager::AChannelManager;
+/// #
+/// # fn example<T: AChannelManager>(channel_manager: T) {
+/// # let channel_manager = channel_manager.get_cm();
+/// let channels = channel_manager.list_usable_channels();
+/// for details in channels {
+/// println!("{:?}", details);
+/// }
+/// # }
+/// ```
+///
+/// Each channel is identified using a [`ChannelId`], which will change throughout the channel's
+/// life cycle. Additionally, channels are assigned a `user_channel_id`, which is given in
+/// [`Event`]s associated with the channel and serves as a fixed identifier but is otherwise unused
+/// by [`ChannelManager`].
+///
+/// ## Opening Channels
+///
+/// To an open a channel with a peer, call [`create_channel`]. This will initiate the process of
+/// opening an outbound channel, which requires self-funding when handling
+/// [`Event::FundingGenerationReady`].
+///
+/// ```
+/// # use bitcoin::{ScriptBuf, Transaction};
+/// # use bitcoin::secp256k1::PublicKey;
+/// # use lightning::ln::channelmanager::AChannelManager;
+/// # use lightning::events::{Event, EventsProvider};
+/// #
+/// # trait Wallet {
+/// # fn create_funding_transaction(
+/// # &self, _amount_sats: u64, _output_script: ScriptBuf
+/// # ) -> Transaction;
+/// # }
+/// #
+/// # fn example<T: AChannelManager, W: Wallet>(channel_manager: T, wallet: W, peer_id: PublicKey) {
+/// # let channel_manager = channel_manager.get_cm();
+/// let value_sats = 1_000_000;
+/// let push_msats = 10_000_000;
+/// match channel_manager.create_channel(peer_id, value_sats, push_msats, 42, None, None) {
+/// Ok(channel_id) => println!("Opening channel {}", channel_id),
+/// Err(e) => println!("Error opening channel: {:?}", e),
+/// }
+///
+/// // On the event processing thread once the peer has responded
+/// channel_manager.process_pending_events(&|event| match event {
+/// Event::FundingGenerationReady {
+/// temporary_channel_id, counterparty_node_id, channel_value_satoshis, output_script,
+/// user_channel_id, ..
+/// } => {
+/// assert_eq!(user_channel_id, 42);
+/// let funding_transaction = wallet.create_funding_transaction(
+/// channel_value_satoshis, output_script
+/// );
+/// match channel_manager.funding_transaction_generated(
+/// &temporary_channel_id, &counterparty_node_id, funding_transaction
+/// ) {
+/// Ok(()) => println!("Funding channel {}", temporary_channel_id),
+/// Err(e) => println!("Error funding channel {}: {:?}", temporary_channel_id, e),
+/// }
+/// },
+/// Event::ChannelPending { channel_id, user_channel_id, former_temporary_channel_id, .. } => {
+/// assert_eq!(user_channel_id, 42);
+/// println!(
+/// "Channel {} now {} pending (funding transaction has been broadcasted)", channel_id,
+/// former_temporary_channel_id.unwrap()
+/// );
+/// },
+/// Event::ChannelReady { channel_id, user_channel_id, .. } => {
+/// assert_eq!(user_channel_id, 42);
+/// println!("Channel {} ready", channel_id);
+/// },
+/// // ...
+/// # _ => {},
+/// });
+/// # }
+/// ```
+///
+/// ## Accepting Channels
+///
+/// Inbound channels are initiated by peers and are automatically accepted unless [`ChannelManager`]
+/// has [`UserConfig::manually_accept_inbound_channels`] set. In that case, the channel may be
+/// either accepted or rejected when handling [`Event::OpenChannelRequest`].
+///
+/// ```
+/// # use bitcoin::secp256k1::PublicKey;
+/// # use lightning::ln::channelmanager::AChannelManager;
+/// # use lightning::events::{Event, EventsProvider};
+/// #
+/// # fn is_trusted(counterparty_node_id: PublicKey) -> bool {
+/// # // ...
+/// # unimplemented!()
+/// # }
+/// #
+/// # fn example<T: AChannelManager>(channel_manager: T) {
+/// # let channel_manager = channel_manager.get_cm();
+/// channel_manager.process_pending_events(&|event| match event {
+/// Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, .. } => {
+/// if !is_trusted(counterparty_node_id) {
+/// match channel_manager.force_close_without_broadcasting_txn(
+/// &temporary_channel_id, &counterparty_node_id
+/// ) {
+/// Ok(()) => println!("Rejecting channel {}", temporary_channel_id),
+/// Err(e) => println!("Error rejecting channel {}: {:?}", temporary_channel_id, e),
+/// }
+/// return;
+/// }
+///
+/// let user_channel_id = 43;
+/// match channel_manager.accept_inbound_channel(
+/// &temporary_channel_id, &counterparty_node_id, user_channel_id
+/// ) {
+/// Ok(()) => println!("Accepting channel {}", temporary_channel_id),
+/// Err(e) => println!("Error accepting channel {}: {:?}", temporary_channel_id, e),
+/// }
+/// },
+/// // ...
+/// # _ => {},
+/// });
+/// # }
+/// ```
+///
+/// ## Closing Channels
+///
+/// There are two ways to close a channel: either cooperatively using [`close_channel`] or
+/// unilaterally using [`force_close_broadcasting_latest_txn`]. The former is ideal as it makes for
+/// lower fees and immediate access to funds. However, the latter may be necessary if the
+/// counterparty isn't behaving properly or has gone offline. [`Event::ChannelClosed`] is generated
+/// once the channel has been closed successfully.
+///
+/// ```
+/// # use bitcoin::secp256k1::PublicKey;
+/// # use lightning::ln::ChannelId;
+/// # use lightning::ln::channelmanager::AChannelManager;
+/// # use lightning::events::{Event, EventsProvider};
+/// #
+/// # fn example<T: AChannelManager>(
+/// # channel_manager: T, channel_id: ChannelId, counterparty_node_id: PublicKey
+/// # ) {
+/// # let channel_manager = channel_manager.get_cm();
+/// match channel_manager.close_channel(&channel_id, &counterparty_node_id) {
+/// Ok(()) => println!("Closing channel {}", channel_id),
+/// Err(e) => println!("Error closing channel {}: {:?}", channel_id, e),
+/// }
+///
+/// // On the event processing thread
+/// channel_manager.process_pending_events(&|event| match event {
+/// Event::ChannelClosed { channel_id, user_channel_id, .. } => {
+/// assert_eq!(user_channel_id, 42);
+/// println!("Channel {} closed", channel_id);
+/// },
+/// // ...
+/// # _ => {},
+/// });
+/// # }
+/// ```
+///
+/// # Payments
+///
+/// [`ChannelManager`] is responsible for sending, forwarding, and receiving payments through its
+/// channels. A payment is typically initiated from a [BOLT 11] invoice or a [BOLT 12] offer, though
+/// spontaneous (i.e., keysend) payments are also possible. Incoming payments don't require
+/// maintaining any additional state as [`ChannelManager`] can reconstruct the [`PaymentPreimage`]
+/// from the [`PaymentSecret`]. Sending payments, however, require tracking in order to retry failed
+/// HTLCs.
+///
+/// After a payment is initiated, it will appear in [`list_recent_payments`] until a short time
+/// after either an [`Event::PaymentSent`] or [`Event::PaymentFailed`] is handled. Failed HTLCs
+/// for a payment will be retried according to the payment's [`Retry`] strategy or until
+/// [`abandon_payment`] is called.
+///
+/// ## BOLT 11 Invoices
+///
+/// The [`lightning-invoice`] crate is useful for creating BOLT 11 invoices. Specifically, use the
+/// functions in its `utils` module for constructing invoices that are compatible with
+/// [`ChannelManager`]. These functions serve as a convenience for building invoices with the
+/// [`PaymentHash`] and [`PaymentSecret`] returned from [`create_inbound_payment`]. To provide your
+/// own [`PaymentHash`], use [`create_inbound_payment_for_hash`] or the corresponding functions in
+/// the [`lightning-invoice`] `utils` module.
+///
+/// [`ChannelManager`] generates an [`Event::PaymentClaimable`] once the full payment has been
+/// received. Call [`claim_funds`] to release the [`PaymentPreimage`], which in turn will result in
+/// an [`Event::PaymentClaimed`].
+///
+/// ```
+/// # use lightning::events::{Event, EventsProvider, PaymentPurpose};
+/// # use lightning::ln::channelmanager::AChannelManager;
+/// #
+/// # fn example<T: AChannelManager>(channel_manager: T) {
+/// # let channel_manager = channel_manager.get_cm();
+/// // Or use utils::create_invoice_from_channelmanager
+/// let known_payment_hash = match channel_manager.create_inbound_payment(
+/// Some(10_000_000), 3600, None
+/// ) {
+/// Ok((payment_hash, _payment_secret)) => {
+/// println!("Creating inbound payment {}", payment_hash);
+/// payment_hash
+/// },
+/// Err(()) => panic!("Error creating inbound payment"),
+/// };
+///
+/// // On the event processing thread
+/// channel_manager.process_pending_events(&|event| match event {
+/// Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose {
+/// PaymentPurpose::Bolt11InvoicePayment { payment_preimage: Some(payment_preimage), .. } => {
+/// assert_eq!(payment_hash, known_payment_hash);
+/// println!("Claiming payment {}", payment_hash);
+/// channel_manager.claim_funds(payment_preimage);
+/// },
+/// PaymentPurpose::Bolt11InvoicePayment { payment_preimage: None, .. } => {
+/// println!("Unknown payment hash: {}", payment_hash);
+/// },
+/// PaymentPurpose::SpontaneousPayment(payment_preimage) => {
+/// assert_ne!(payment_hash, known_payment_hash);
+/// println!("Claiming spontaneous payment {}", payment_hash);
+/// channel_manager.claim_funds(payment_preimage);
+/// },
+/// // ...
+/// # _ => {},
+/// },
+/// Event::PaymentClaimed { payment_hash, amount_msat, .. } => {
+/// assert_eq!(payment_hash, known_payment_hash);
+/// println!("Claimed {} msats", amount_msat);
+/// },
+/// // ...
+/// # _ => {},
+/// });
+/// # }
+/// ```
+///
+/// For paying an invoice, [`lightning-invoice`] provides a `payment` module with convenience
+/// functions for use with [`send_payment`].
+///
+/// ```
+/// # use lightning::events::{Event, EventsProvider};
+/// # use lightning::ln::PaymentHash;
+/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, RecipientOnionFields, Retry};
+/// # use lightning::routing::router::RouteParameters;
+/// #
+/// # fn example<T: AChannelManager>(
+/// # channel_manager: T, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
+/// # route_params: RouteParameters, retry: Retry
+/// # ) {
+/// # let channel_manager = channel_manager.get_cm();
+/// // let (payment_hash, recipient_onion, route_params) =
+/// // payment::payment_parameters_from_invoice(&invoice);
+/// let payment_id = PaymentId([42; 32]);
+/// match channel_manager.send_payment(
+/// payment_hash, recipient_onion, payment_id, route_params, retry
+/// ) {
+/// Ok(()) => println!("Sending payment with hash {}", payment_hash),
+/// Err(e) => println!("Failed sending payment with hash {}: {:?}", payment_hash, e),
+/// }
+///
+/// let expected_payment_id = payment_id;
+/// let expected_payment_hash = payment_hash;
+/// assert!(
+/// channel_manager.list_recent_payments().iter().find(|details| matches!(
+/// details,
+/// RecentPaymentDetails::Pending {
+/// payment_id: expected_payment_id,
+/// payment_hash: expected_payment_hash,
+/// ..
+/// }
+/// )).is_some()
+/// );
+///
+/// // On the event processing thread
+/// channel_manager.process_pending_events(&|event| match event {
+/// Event::PaymentSent { payment_hash, .. } => println!("Paid {}", payment_hash),
+/// Event::PaymentFailed { payment_hash, .. } => println!("Failed paying {}", payment_hash),
+/// // ...
+/// # _ => {},
+/// });
+/// # }
+/// ```
+///
+/// ## BOLT 12 Offers
+///
+/// The [`offers`] module is useful for creating BOLT 12 offers. An [`Offer`] is a precursor to a
+/// [`Bolt12Invoice`], which must first be requested by the payer. The interchange of these messages
+/// as defined in the specification is handled by [`ChannelManager`] and its implementation of
+/// [`OffersMessageHandler`]. However, this only works with an [`Offer`] created using a builder
+/// returned by [`create_offer_builder`]. With this approach, BOLT 12 offers and invoices are
+/// stateless just as BOLT 11 invoices are.
+///
+/// ```
+/// # use lightning::events::{Event, EventsProvider, PaymentPurpose};
+/// # use lightning::ln::channelmanager::AChannelManager;
+/// # use lightning::offers::parse::Bolt12SemanticError;
+/// #
+/// # fn example<T: AChannelManager>(channel_manager: T) -> Result<(), Bolt12SemanticError> {
+/// # let channel_manager = channel_manager.get_cm();
+/// let offer = channel_manager
+/// .create_offer_builder("coffee".to_string())?
+/// # ;
+/// # // Needed for compiling for c_bindings
+/// # let builder: lightning::offers::offer::OfferBuilder<_, _> = offer.into();
+/// # let offer = builder
+/// .amount_msats(10_000_000)
+/// .build()?;
+/// let bech32_offer = offer.to_string();
+///
+/// // On the event processing thread
+/// channel_manager.process_pending_events(&|event| match event {
+/// Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose {
+/// PaymentPurpose::Bolt12OfferPayment { payment_preimage: Some(payment_preimage), .. } => {
+/// println!("Claiming payment {}", payment_hash);
+/// channel_manager.claim_funds(payment_preimage);
+/// },
+/// PaymentPurpose::Bolt12OfferPayment { payment_preimage: None, .. } => {
+/// println!("Unknown payment hash: {}", payment_hash);
+/// },
+/// // ...
+/// # _ => {},
+/// },
+/// Event::PaymentClaimed { payment_hash, amount_msat, .. } => {
+/// println!("Claimed {} msats", amount_msat);
+/// },
+/// // ...
+/// # _ => {},
+/// });
+/// # Ok(())
+/// # }
+/// ```
+///
+/// Use [`pay_for_offer`] to initiated payment, which sends an [`InvoiceRequest`] for an [`Offer`]
+/// and pays the [`Bolt12Invoice`] response. In addition to success and failure events,
+/// [`ChannelManager`] may also generate an [`Event::InvoiceRequestFailed`].
+///
+/// ```
+/// # use lightning::events::{Event, EventsProvider};
+/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, Retry};
+/// # use lightning::offers::offer::Offer;
+/// #
+/// # fn example<T: AChannelManager>(
+/// # channel_manager: T, offer: &Offer, quantity: Option<u64>, amount_msats: Option<u64>,
+/// # payer_note: Option<String>, retry: Retry, max_total_routing_fee_msat: Option<u64>
+/// # ) {
+/// # let channel_manager = channel_manager.get_cm();
+/// let payment_id = PaymentId([42; 32]);
+/// match channel_manager.pay_for_offer(
+/// offer, quantity, amount_msats, payer_note, payment_id, retry, max_total_routing_fee_msat
+/// ) {
+/// Ok(()) => println!("Requesting invoice for offer"),
+/// Err(e) => println!("Unable to request invoice for offer: {:?}", e),
+/// }
+///
+/// // First the payment will be waiting on an invoice
+/// let expected_payment_id = payment_id;
+/// assert!(
+/// channel_manager.list_recent_payments().iter().find(|details| matches!(
+/// details,
+/// RecentPaymentDetails::AwaitingInvoice { payment_id: expected_payment_id }
+/// )).is_some()
+/// );
+///
+/// // Once the invoice is received, a payment will be sent
+/// assert!(
+/// channel_manager.list_recent_payments().iter().find(|details| matches!(
+/// details,
+/// RecentPaymentDetails::Pending { payment_id: expected_payment_id, .. }
+/// )).is_some()
+/// );
+///
+/// // On the event processing thread
+/// channel_manager.process_pending_events(&|event| match event {
+/// Event::PaymentSent { payment_id: Some(payment_id), .. } => println!("Paid {}", payment_id),
+/// Event::PaymentFailed { payment_id, .. } => println!("Failed paying {}", payment_id),
+/// Event::InvoiceRequestFailed { payment_id, .. } => println!("Failed paying {}", payment_id),
+/// // ...
+/// # _ => {},
+/// });
+/// # }
+/// ```
+///
+/// ## BOLT 12 Refunds
+///
+/// A [`Refund`] is a request for an invoice to be paid. Like *paying* for an [`Offer`], *creating*
+/// a [`Refund`] involves maintaining state since it represents a future outbound payment.
+/// Therefore, use [`create_refund_builder`] when creating one, otherwise [`ChannelManager`] will
+/// refuse to pay any corresponding [`Bolt12Invoice`] that it receives.
+///
+/// ```
+/// # use core::time::Duration;
+/// # use lightning::events::{Event, EventsProvider};
+/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, Retry};
+/// # use lightning::offers::parse::Bolt12SemanticError;
+/// #
+/// # fn example<T: AChannelManager>(
+/// # channel_manager: T, amount_msats: u64, absolute_expiry: Duration, retry: Retry,
+/// # max_total_routing_fee_msat: Option<u64>
+/// # ) -> Result<(), Bolt12SemanticError> {
+/// # let channel_manager = channel_manager.get_cm();
+/// let payment_id = PaymentId([42; 32]);
+/// let refund = channel_manager
+/// .create_refund_builder(
+/// "coffee".to_string(), amount_msats, absolute_expiry, payment_id, retry,
+/// max_total_routing_fee_msat
+/// )?
+/// # ;
+/// # // Needed for compiling for c_bindings
+/// # let builder: lightning::offers::refund::RefundBuilder<_> = refund.into();
+/// # let refund = builder
+/// .payer_note("refund for order 1234".to_string())
+/// .build()?;
+/// let bech32_refund = refund.to_string();
+///
+/// // First the payment will be waiting on an invoice
+/// let expected_payment_id = payment_id;
+/// assert!(
+/// channel_manager.list_recent_payments().iter().find(|details| matches!(
+/// details,
+/// RecentPaymentDetails::AwaitingInvoice { payment_id: expected_payment_id }
+/// )).is_some()
+/// );
+///
+/// // Once the invoice is received, a payment will be sent
+/// assert!(
+/// channel_manager.list_recent_payments().iter().find(|details| matches!(
+/// details,
+/// RecentPaymentDetails::Pending { payment_id: expected_payment_id, .. }
+/// )).is_some()
+/// );
+///
+/// // On the event processing thread
+/// channel_manager.process_pending_events(&|event| match event {
+/// Event::PaymentSent { payment_id: Some(payment_id), .. } => println!("Paid {}", payment_id),
+/// Event::PaymentFailed { payment_id, .. } => println!("Failed paying {}", payment_id),
+/// // ...
+/// # _ => {},
+/// });
+/// # Ok(())
+/// # }
+/// ```
+///
+/// Use [`request_refund_payment`] to send a [`Bolt12Invoice`] for receiving the refund. Similar to
+/// *creating* an [`Offer`], this is stateless as it represents an inbound payment.
+///
+/// ```
+/// # use lightning::events::{Event, EventsProvider, PaymentPurpose};
+/// # use lightning::ln::channelmanager::AChannelManager;
+/// # use lightning::offers::refund::Refund;
+/// #
+/// # fn example<T: AChannelManager>(channel_manager: T, refund: &Refund) {
+/// # let channel_manager = channel_manager.get_cm();
+/// let known_payment_hash = match channel_manager.request_refund_payment(refund) {
+/// Ok(invoice) => {
+/// let payment_hash = invoice.payment_hash();
+/// println!("Requesting refund payment {}", payment_hash);
+/// payment_hash
+/// },
+/// Err(e) => panic!("Unable to request payment for refund: {:?}", e),
+/// };
///
-/// Implements [`ChannelMessageHandler`], handling the multi-channel parts and passing things through
-/// to individual Channels.
+/// // On the event processing thread
+/// channel_manager.process_pending_events(&|event| match event {
+/// Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose {
+/// PaymentPurpose::Bolt12RefundPayment { payment_preimage: Some(payment_preimage), .. } => {
+/// assert_eq!(payment_hash, known_payment_hash);
+/// println!("Claiming payment {}", payment_hash);
+/// channel_manager.claim_funds(payment_preimage);
+/// },
+/// PaymentPurpose::Bolt12RefundPayment { payment_preimage: None, .. } => {
+/// println!("Unknown payment hash: {}", payment_hash);
+/// },
+/// // ...
+/// # _ => {},
+/// },
+/// Event::PaymentClaimed { payment_hash, amount_msat, .. } => {
+/// assert_eq!(payment_hash, known_payment_hash);
+/// println!("Claimed {} msats", amount_msat);
+/// },
+/// // ...
+/// # _ => {},
+/// });
+/// # }
+/// ```
+///
+/// # Persistence
///
/// Implements [`Writeable`] to write out all channel state to disk. Implies [`peer_disconnected`] for
/// all peers during write/read (though does not modify this instance, only the instance being
/// tells you the last block hash which was connected. You should get the best block tip before using the manager.
/// See [`chain::Listen`] and [`chain::Confirm`] for more details.
///
+/// # `ChannelUpdate` Messages
+///
/// Note that `ChannelManager` is responsible for tracking liveness of its channels and generating
/// [`ChannelUpdate`] messages informing peers that the channel is temporarily disabled. To avoid
/// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
/// offline for a full minute. In order to track this, you must call
/// [`timer_tick_occurred`] roughly once per minute, though it doesn't have to be perfect.
///
+/// # DoS Mitigation
+///
/// To avoid trivial DoS issues, `ChannelManager` limits the number of inbound connections and
/// inbound channels without confirmed funding transactions. This may result in nodes which we do
/// not have a channel with being unable to connect to us or open new channels with us if we have
/// exempted from the count of unfunded channels. Similarly, outbound channels and connections are
/// never limited. Please ensure you limit the count of such channels yourself.
///
+/// # Type Aliases
+///
/// Rather than using a plain `ChannelManager`, it is preferable to use either a [`SimpleArcChannelManager`]
/// a [`SimpleRefChannelManager`], for conciseness. See their documentation for more details, but
/// essentially you should default to using a [`SimpleRefChannelManager`], and use a
/// [`SimpleArcChannelManager`] when you require a `ChannelManager` with a static lifetime, such as when
/// you're using lightning-net-tokio.
///
+/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
+/// [`MessageHandler`]: crate::ln::peer_handler::MessageHandler
+/// [`OnionMessenger`]: crate::onion_message::messenger::OnionMessenger
+/// [`PeerManager::read_event`]: crate::ln::peer_handler::PeerManager::read_event
+/// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
+/// [`timer_tick_occurred`]: Self::timer_tick_occurred
+/// [`get_and_clear_needs_persistence`]: Self::get_and_clear_needs_persistence
+/// [`Persister`]: crate::util::persist::Persister
+/// [`KVStore`]: crate::util::persist::KVStore
+/// [`get_event_or_persistence_needed_future`]: Self::get_event_or_persistence_needed_future
+/// [`lightning-block-sync`]: https://docs.rs/lightning_block_sync/latest/lightning_block_sync
+/// [`lightning-transaction-sync`]: https://docs.rs/lightning_transaction_sync/latest/lightning_transaction_sync
+/// [`lightning-background-processor`]: https://docs.rs/lightning_background_processor/lightning_background_processor
+/// [`list_channels`]: Self::list_channels
+/// [`list_usable_channels`]: Self::list_usable_channels
+/// [`create_channel`]: Self::create_channel
+/// [`close_channel`]: Self::force_close_broadcasting_latest_txn
+/// [`force_close_broadcasting_latest_txn`]: Self::force_close_broadcasting_latest_txn
+/// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md
+/// [BOLT 12]: https://github.com/rustyrussell/lightning-rfc/blob/guilt/offers/12-offer-encoding.md
+/// [`list_recent_payments`]: Self::list_recent_payments
+/// [`abandon_payment`]: Self::abandon_payment
+/// [`lightning-invoice`]: https://docs.rs/lightning_invoice/latest/lightning_invoice
+/// [`create_inbound_payment`]: Self::create_inbound_payment
+/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
+/// [`claim_funds`]: Self::claim_funds
+/// [`send_payment`]: Self::send_payment
+/// [`offers`]: crate::offers
+/// [`create_offer_builder`]: Self::create_offer_builder
+/// [`pay_for_offer`]: Self::pay_for_offer
+/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
+/// [`create_refund_builder`]: Self::create_refund_builder
+/// [`request_refund_payment`]: Self::request_refund_payment
/// [`peer_disconnected`]: msgs::ChannelMessageHandler::peer_disconnected
/// [`funding_created`]: msgs::FundingCreated
/// [`funding_transaction_generated`]: Self::funding_transaction_generated
/// [`BlockHash`]: bitcoin::hash_types::BlockHash
/// [`update_channel`]: chain::Watch::update_channel
/// [`ChannelUpdate`]: msgs::ChannelUpdate
-/// [`timer_tick_occurred`]: Self::timer_tick_occurred
/// [`read`]: ReadableArgs::read
//
// Lock order:
// | |
// | |__`pending_intercepted_htlcs`
// |
+// |__`decode_update_add_htlcs`
+// |
// |__`per_peer_state`
// |
// |__`pending_inbound_payments`
/// See `ChannelManager` struct-level documentation for lock order requirements.
pending_intercepted_htlcs: Mutex<HashMap<InterceptId, PendingAddHTLCInfo>>,
+ /// SCID/SCID Alias -> pending `update_add_htlc`s to decode.
+ ///
+ /// Note that because we may have an SCID Alias as the key we can have two entries per channel,
+ /// though in practice we probably won't be receiving HTLCs for a channel both via the alias
+ /// and via the classic SCID.
+ ///
+ /// Note that no consistency guarantees are made about the existence of a channel with the
+ /// `short_channel_id` here, nor the `channel_id` in `UpdateAddHTLC`!
+ ///
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
+ decode_update_add_htlcs: Mutex<HashMap<u64, Vec<msgs::UpdateAddHTLC>>>,
+
/// The sets of payments which are claimable or currently being claimed. See
/// [`ClaimablePayments`]' individual field docs for more info.
///
pending_offers_messages: Mutex<Vec<PendingOnionMessage<OffersMessage>>>,
+ /// Tracks the message events that are to be broadcasted when we are connected to some peer.
+ pending_broadcast_messages: Mutex<Vec<MessageSendEvent>>,
+
entropy_source: ES,
node_signer: NS,
signer_provider: SP,
pub counterparty: ChannelCounterparty,
/// The Channel's funding transaction output, if we've negotiated the funding transaction with
/// our counterparty already.
- ///
- /// Note that, if this has been set, `channel_id` will be equivalent to
- /// `funding_txo.unwrap().to_channel_id()`.
pub funding_txo: Option<OutPoint>,
/// The features which this channel operates with. See individual features for more info.
///
///
/// This field is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.109.
pub config: Option<ChannelConfig>,
+ /// Pending inbound HTLCs.
+ ///
+ /// This field is empty for objects serialized with LDK versions prior to 0.0.122.
+ pub pending_inbound_htlcs: Vec<InboundHTLCDetails>,
+ /// Pending outbound HTLCs.
+ ///
+ /// This field is empty for objects serialized with LDK versions prior to 0.0.122.
+ pub pending_outbound_htlcs: Vec<OutboundHTLCDetails>,
}
impl ChannelDetails {
inbound_htlc_maximum_msat: context.get_holder_htlc_maximum_msat(),
config: Some(context.config()),
channel_shutdown_state: Some(context.shutdown_state()),
+ pending_inbound_htlcs: context.get_pending_inbound_htlc_details(),
+ pending_outbound_htlcs: context.get_pending_outbound_htlc_details(),
}
}
}
match $internal {
Ok(msg) => Ok(msg),
Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => {
- let mut msg_events = Vec::with_capacity(2);
+ let mut msg_event = None;
if let Some((shutdown_res, update_option)) = shutdown_finish {
let counterparty_node_id = shutdown_res.counterparty_node_id;
$self.finish_close_channel(shutdown_res);
if let Some(update) = update_option {
- msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ let mut pending_broadcast_messages = $self.pending_broadcast_messages.lock().unwrap();
+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
if let msgs::ErrorAction::IgnoreError = err.action {
} else {
- msg_events.push(events::MessageSendEvent::HandleError {
+ msg_event = Some(events::MessageSendEvent::HandleError {
node_id: $counterparty_node_id,
action: err.action.clone()
});
}
- if !msg_events.is_empty() {
+ if let Some(msg_event) = msg_event {
let per_peer_state = $self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) {
let mut peer_state = peer_state_mutex.lock().unwrap();
- peer_state.pending_msg_events.append(&mut msg_events);
+ peer_state.pending_msg_events.push(msg_event);
}
}
ChannelPhase::UnfundedInboundV1(channel) => {
convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
},
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedOutboundV2(channel) => {
+ convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
+ },
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedInboundV2(channel) => {
+ convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
+ },
}
};
}
counterparty_node_id: $channel.context.get_counterparty_node_id(),
user_channel_id: $channel.context.get_user_id(),
funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(),
+ channel_type: Some($channel.context.get_channel_type().clone()),
}, None));
$channel.context.set_channel_pending_event_emitted();
}
let logger = WithChannelContext::from(&$self.logger, &$chan.context);
let mut updates = $chan.monitor_updating_restored(&&logger,
&$self.node_signer, $self.chain_hash, &$self.default_configuration,
- $self.best_block.read().unwrap().height());
+ $self.best_block.read().unwrap().height);
let counterparty_node_id = $chan.context.get_counterparty_node_id();
let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() {
// We only send a channel_update in the case where we are just now sending a
let update_actions = $peer_state.monitor_update_blocked_actions
.remove(&$chan.context.channel_id()).unwrap_or(Vec::new());
- let htlc_forwards = $self.handle_channel_resumption(
+ let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption(
&mut $peer_state.pending_msg_events, $chan, updates.raa,
- updates.commitment_update, updates.order, updates.accepted_htlcs,
+ updates.commitment_update, updates.order, updates.accepted_htlcs, updates.pending_update_adds,
updates.funding_broadcastable, updates.channel_ready,
updates.announcement_sigs);
if let Some(upd) = channel_update {
if let Some(forwards) = htlc_forwards {
$self.forward_htlcs(&mut [forwards][..]);
}
+ if let Some(decode) = decode_update_add_htlcs {
+ $self.push_decode_update_add_htlcs(decode);
+ }
$self.finalize_claims(updates.finalized_claimed_htlcs);
for failure in updates.failed_htlcs.drain(..) {
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
best_block: RwLock::new(params.best_block),
- outbound_scid_aliases: Mutex::new(HashSet::new()),
- pending_inbound_payments: Mutex::new(HashMap::new()),
+ outbound_scid_aliases: Mutex::new(new_hash_set()),
+ pending_inbound_payments: Mutex::new(new_hash_map()),
pending_outbound_payments: OutboundPayments::new(),
- forward_htlcs: Mutex::new(HashMap::new()),
- claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: HashMap::new(), pending_claiming_payments: HashMap::new() }),
- pending_intercepted_htlcs: Mutex::new(HashMap::new()),
- outpoint_to_peer: Mutex::new(HashMap::new()),
- short_to_chan_info: FairRwLock::new(HashMap::new()),
+ forward_htlcs: Mutex::new(new_hash_map()),
+ decode_update_add_htlcs: Mutex::new(new_hash_map()),
+ claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }),
+ pending_intercepted_htlcs: Mutex::new(new_hash_map()),
+ outpoint_to_peer: Mutex::new(new_hash_map()),
+ short_to_chan_info: FairRwLock::new(new_hash_map()),
our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(),
secp_ctx,
highest_seen_timestamp: AtomicUsize::new(current_timestamp as usize),
- per_peer_state: FairRwLock::new(HashMap::new()),
+ per_peer_state: FairRwLock::new(new_hash_map()),
pending_events: Mutex::new(VecDeque::new()),
pending_events_processor: AtomicBool::new(false),
funding_batch_states: Mutex::new(BTreeMap::new()),
pending_offers_messages: Mutex::new(Vec::new()),
+ pending_broadcast_messages: Mutex::new(Vec::new()),
entropy_source,
node_signer,
}
fn create_and_insert_outbound_scid_alias(&self) -> u64 {
- let height = self.best_block.read().unwrap().height();
+ let height = self.best_block.read().unwrap().height;
let mut outbound_scid_alias = 0;
let mut i = 0;
loop {
let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key,
their_features, channel_value_satoshis, push_msat, user_channel_id, config,
- self.best_block.read().unwrap().height(), outbound_scid_alias, temporary_channel_id)
+ self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id)
{
Ok(res) => res,
Err(e) => {
// the same channel.
let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
{
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
// the same channel.
let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
{
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
/// Gets the list of channels we have with a given counterparty, in random order.
pub fn list_channels_with_counterparty(&self, counterparty_node_id: &PublicKey) -> Vec<ChannelDetails> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
- if let Some((_, funding_txo, monitor_update)) = shutdown_res.monitor_update {
+ if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
// There isn't anything we can do if we get an update failure - we're already
// force-closing. The monitor update on the required in-memory copy should broadcast
// the latest local state, which is the best we can do anyway. Thus, it is safe to
// Unfunded channel has no update
(None, chan_phase.context().get_counterparty_node_id())
},
+ // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed.
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => {
+ self.finish_close_channel(chan_phase.context_mut().force_shutdown(false, closure_reason));
+ // Unfunded channel has no update
+ (None, chan_phase.context().get_counterparty_node_id())
+ },
}
} else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
log_error!(logger, "Force-closing channel {}", &channel_id);
}
};
if let Some(update) = update_opt {
- // Try to send the `BroadcastChannelUpdate` to the peer we just force-closed on, but if
- // not try to broadcast it via whatever peer we have.
- let per_peer_state = self.per_peer_state.read().unwrap();
- let a_peer_state_opt = per_peer_state.get(peer_node_id)
- .ok_or(per_peer_state.values().next());
- if let Ok(a_peer_state_mutex) = a_peer_state_opt {
- let mut a_peer_state = a_peer_state_mutex.lock().unwrap();
- a_peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
+ // If we have some Channel Update to broadcast, we cache it and broadcast it later.
+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
}
Ok(counterparty_node_id)
/// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the
/// `counterparty_node_id` isn't the counterparty of the corresponding channel.
///
- /// You can always get the latest local transaction(s) to broadcast from
- /// [`ChannelMonitor::get_latest_holder_commitment_txn`].
+ /// You can always broadcast the latest local transaction(s) via
+ /// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`].
pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey)
-> Result<(), APIError> {
self.force_close_sending_error(channel_id, counterparty_node_id, false)
}
}
+ fn can_forward_htlc_to_outgoing_channel(
+ &self, chan: &mut Channel<SP>, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails
+ ) -> Result<(), (&'static str, u16, Option<msgs::ChannelUpdate>)> {
+ if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
+ // Note that the behavior here should be identical to the above block - we
+ // should NOT reveal the existence or non-existence of a private channel if
+ // we don't allow forwards outbound over them.
+ return Err(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
+ }
+ if chan.context.get_channel_type().supports_scid_privacy() && next_packet.outgoing_scid != chan.context.outbound_scid_alias() {
+ // `option_scid_alias` (referred to in LDK as `scid_privacy`) means
+ // "refuse to forward unless the SCID alias was used", so we pretend
+ // we don't have the channel here.
+ return Err(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
+ }
+
+ // Note that we could technically not return an error yet here and just hope
+ // that the connection is reestablished or monitor updated by the time we get
+ // around to doing the actual forward, but better to fail early if we can and
+ // hopefully an attacker trying to path-trace payments cannot make this occur
+ // on a small/per-node/per-channel scale.
+ if !chan.context.is_live() { // channel_disabled
+ // If the channel_update we're going to return is disabled (i.e. the
+ // peer has been disabled for some time), return `channel_disabled`,
+ // otherwise return `temporary_channel_failure`.
+ let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok();
+ if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
+ return Err(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
+ } else {
+ return Err(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
+ }
+ }
+ if next_packet.outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
+ let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok();
+ return Err(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
+ }
+ if let Err((err, code)) = chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value) {
+ let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok();
+ return Err((err, code, chan_update_opt));
+ }
+
+ Ok(())
+ }
+
+ /// Executes a callback `C` that returns some value `X` on the channel found with the given
+ /// `scid`. `None` is returned when the channel is not found.
+ fn do_funded_channel_callback<X, C: Fn(&mut Channel<SP>) -> X>(
+ &self, scid: u64, callback: C,
+ ) -> Option<X> {
+ let (counterparty_node_id, channel_id) = match self.short_to_chan_info.read().unwrap().get(&scid).cloned() {
+ None => return None,
+ Some((cp_id, id)) => (cp_id, id),
+ };
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
+ if peer_state_mutex_opt.is_none() {
+ return None;
+ }
+ let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.get_mut(&channel_id).and_then(
+ |chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None }
+ ) {
+ None => None,
+ Some(chan) => Some(callback(chan)),
+ }
+ }
+
+ fn can_forward_htlc(
+ &self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails
+ ) -> Result<(), (&'static str, u16, Option<msgs::ChannelUpdate>)> {
+ match self.do_funded_channel_callback(next_packet_details.outgoing_scid, |chan: &mut Channel<SP>| {
+ self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details)
+ }) {
+ Some(Ok(())) => {},
+ Some(Err(e)) => return Err(e),
+ None => {
+ // If we couldn't find the channel info for the scid, it may be a phantom or
+ // intercept forward.
+ if (self.default_configuration.accept_intercept_htlcs &&
+ fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)) ||
+ fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)
+ {} else {
+ return Err(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+ }
+ }
+ }
+
+ let cur_height = self.best_block.read().unwrap().height + 1;
+ if let Err((err_msg, err_code)) = check_incoming_htlc_cltv(
+ cur_height, next_packet_details.outgoing_cltv_value, msg.cltv_expiry
+ ) {
+ let chan_update_opt = self.do_funded_channel_callback(next_packet_details.outgoing_scid, |chan: &mut Channel<SP>| {
+ self.get_channel_update_for_onion(next_packet_details.outgoing_scid, chan).ok()
+ }).flatten();
+ return Err((err_msg, err_code, chan_update_opt));
+ }
+
+ Ok(())
+ }
+
+ fn htlc_failure_from_update_add_err(
+ &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, err_msg: &'static str,
+ mut err_code: u16, chan_update: Option<msgs::ChannelUpdate>, is_intro_node_blinded_forward: bool,
+ shared_secret: &[u8; 32]
+ ) -> HTLCFailureMsg {
+ let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
+ if chan_update.is_some() && err_code & 0x1000 == 0x1000 {
+ let chan_update = chan_update.unwrap();
+ if err_code == 0x1000 | 11 || err_code == 0x1000 | 12 {
+ msg.amount_msat.write(&mut res).expect("Writes cannot fail");
+ }
+ else if err_code == 0x1000 | 13 {
+ msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
+ }
+ else if err_code == 0x1000 | 20 {
+ // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
+ 0u16.write(&mut res).expect("Writes cannot fail");
+ }
+ (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
+ msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
+ chan_update.write(&mut res).expect("Writes cannot fail");
+ } else if err_code & 0x1000 == 0x1000 {
+ // If we're trying to return an error that requires a `channel_update` but
+ // we're forwarding to a phantom or intercept "channel" (i.e. cannot
+ // generate an update), just use the generic "temporary_node_failure"
+ // instead.
+ err_code = 0x2000 | 2;
+ }
+
+ log_info!(
+ WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id)),
+ "Failed to accept/forward incoming HTLC: {}", err_msg
+ );
+ // If `msg.blinding_point` is set, we must always fail with malformed.
+ if msg.blinding_point.is_some() {
+ return HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ sha256_of_onion: [0; 32],
+ failure_code: INVALID_ONION_BLINDING,
+ });
+ }
+
+ let (err_code, err_data) = if is_intro_node_blinded_forward {
+ (INVALID_ONION_BLINDING, &[0; 32][..])
+ } else {
+ (err_code, &res.0[..])
+ };
+ HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ reason: HTLCFailReason::reason(err_code, err_data.to_vec())
+ .get_encrypted_failure_packet(shared_secret, &None),
+ })
+ }
+
fn decode_update_add_htlc_onion(
&self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey,
) -> Result<
msg, &self.node_signer, &self.logger, &self.secp_ctx
)?;
- let is_intro_node_forward = match next_hop {
- onion_utils::Hop::Forward {
- next_hop_data: msgs::InboundOnionPayload::BlindedForward {
- intro_node_blinding_point: Some(_), ..
- }, ..
- } => true,
- _ => false,
- };
-
- macro_rules! return_err {
- ($msg: expr, $err_code: expr, $data: expr) => {
- {
- log_info!(
- WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id)),
- "Failed to accept/forward incoming HTLC: {}", $msg
- );
- // If `msg.blinding_point` is set, we must always fail with malformed.
- if msg.blinding_point.is_some() {
- return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
- channel_id: msg.channel_id,
- htlc_id: msg.htlc_id,
- sha256_of_onion: [0; 32],
- failure_code: INVALID_ONION_BLINDING,
- }));
- }
-
- let (err_code, err_data) = if is_intro_node_forward {
- (INVALID_ONION_BLINDING, &[0; 32][..])
- } else { ($err_code, $data) };
- return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
- channel_id: msg.channel_id,
- htlc_id: msg.htlc_id,
- reason: HTLCFailReason::reason(err_code, err_data.to_vec())
- .get_encrypted_failure_packet(&shared_secret, &None),
- }));
- }
- }
- }
-
- let NextPacketDetails {
- next_packet_pubkey, outgoing_amt_msat, outgoing_scid, outgoing_cltv_value
- } = match next_packet_details_opt {
+ let next_packet_details = match next_packet_details_opt {
Some(next_packet_details) => next_packet_details,
// it is a receive, so no need for outbound checks
None => return Ok((next_hop, shared_secret, None)),
// Perform outbound checks here instead of in [`Self::construct_pending_htlc_info`] because we
// can't hold the outbound peer state lock at the same time as the inbound peer state lock.
- if let Some((err, mut code, chan_update)) = loop {
- let id_option = self.short_to_chan_info.read().unwrap().get(&outgoing_scid).cloned();
- let forwarding_chan_info_opt = match id_option {
- None => { // unknown_next_peer
- // Note that this is likely a timing oracle for detecting whether an scid is a
- // phantom or an intercept.
- if (self.default_configuration.accept_intercept_htlcs &&
- fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)) ||
- fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)
- {
- None
- } else {
- break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
- }
- },
- Some((cp_id, id)) => Some((cp_id.clone(), id.clone())),
- };
- let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt {
- let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
- if peer_state_mutex_opt.is_none() {
- break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
- }
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- let chan = match peer_state.channel_by_id.get_mut(&forwarding_id).map(
- |chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None }
- ).flatten() {
- None => {
- // Channel was removed. The short_to_chan_info and channel_by_id maps
- // have no consistency guarantees.
- break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
- },
- Some(chan) => chan
- };
- if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
- // Note that the behavior here should be identical to the above block - we
- // should NOT reveal the existence or non-existence of a private channel if
- // we don't allow forwards outbound over them.
- break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
- }
- if chan.context.get_channel_type().supports_scid_privacy() && outgoing_scid != chan.context.outbound_scid_alias() {
- // `option_scid_alias` (referred to in LDK as `scid_privacy`) means
- // "refuse to forward unless the SCID alias was used", so we pretend
- // we don't have the channel here.
- break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
- }
- let chan_update_opt = self.get_channel_update_for_onion(outgoing_scid, chan).ok();
-
- // Note that we could technically not return an error yet here and just hope
- // that the connection is reestablished or monitor updated by the time we get
- // around to doing the actual forward, but better to fail early if we can and
- // hopefully an attacker trying to path-trace payments cannot make this occur
- // on a small/per-node/per-channel scale.
- if !chan.context.is_live() { // channel_disabled
- // If the channel_update we're going to return is disabled (i.e. the
- // peer has been disabled for some time), return `channel_disabled`,
- // otherwise return `temporary_channel_failure`.
- if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
- break Some(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
- } else {
- break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
- }
- }
- if outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
- break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
- }
- if let Err((err, code)) = chan.htlc_satisfies_config(&msg, outgoing_amt_msat, outgoing_cltv_value) {
- break Some((err, code, chan_update_opt));
- }
- chan_update_opt
- } else {
- None
- };
-
- let cur_height = self.best_block.read().unwrap().height() + 1;
-
- if let Err((err_msg, code)) = check_incoming_htlc_cltv(
- cur_height, outgoing_cltv_value, msg.cltv_expiry
- ) {
- if code & 0x1000 != 0 && chan_update_opt.is_none() {
- // We really should set `incorrect_cltv_expiry` here but as we're not
- // forwarding over a real channel we can't generate a channel_update
- // for it. Instead we just return a generic temporary_node_failure.
- break Some((err_msg, 0x2000 | 2, None))
- }
- let chan_update_opt = if code & 0x1000 != 0 { chan_update_opt } else { None };
- break Some((err_msg, code, chan_update_opt));
- }
+ self.can_forward_htlc(&msg, &next_packet_details).map_err(|e| {
+ let (err_msg, err_code, chan_update_opt) = e;
+ self.htlc_failure_from_update_add_err(
+ msg, counterparty_node_id, err_msg, err_code, chan_update_opt,
+ next_hop.is_intro_node_blinded_forward(), &shared_secret
+ )
+ })?;
- break None;
- }
- {
- let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
- if let Some(chan_update) = chan_update {
- if code == 0x1000 | 11 || code == 0x1000 | 12 {
- msg.amount_msat.write(&mut res).expect("Writes cannot fail");
- }
- else if code == 0x1000 | 13 {
- msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
- }
- else if code == 0x1000 | 20 {
- // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
- 0u16.write(&mut res).expect("Writes cannot fail");
- }
- (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
- msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
- chan_update.write(&mut res).expect("Writes cannot fail");
- } else if code & 0x1000 == 0x1000 {
- // If we're trying to return an error that requires a `channel_update` but
- // we're forwarding to a phantom or intercept "channel" (i.e. cannot
- // generate an update), just use the generic "temporary_node_failure"
- // instead.
- code = 0x2000 | 2;
- }
- return_err!(err, code, &res.0[..]);
- }
- Ok((next_hop, shared_secret, Some(next_packet_pubkey)))
+ Ok((next_hop, shared_secret, Some(next_packet_details.next_packet_pubkey)))
}
fn construct_pending_htlc_status<'a>(
match decoded_hop {
onion_utils::Hop::Receive(next_hop_data) => {
// OUR PAYMENT!
- let current_height: u32 = self.best_block.read().unwrap().height();
+ let current_height: u32 = self.best_block.read().unwrap().height;
match create_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash,
msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat,
current_height, self.default_configuration.accept_mpp_keysend)
/// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
/// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
pub fn send_payment_with_route(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments
.send_payment_with_route(route, payment_hash, recipient_onion, payment_id,
/// Similar to [`ChannelManager::send_payment_with_route`], but will automatically find a route based on
/// `route_params` and retry failed payment paths based on `retry_strategy`.
pub fn send_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), RetryableSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments
.send_payment(payment_hash, recipient_onion, payment_id, retry_strategy, route_params,
#[cfg(test)]
pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion,
keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer,
#[cfg(test)]
pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, recipient_onion, payment_id, route, None, &self.entropy_source, best_block_height)
}
}
pub(super) fn send_payment_for_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments
.send_payment_for_bolt12_invoice(
///
/// [`send_payment`]: Self::send_payment
pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result<PaymentHash, PaymentSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.send_spontaneous_payment_with_route(
route, payment_preimage, recipient_onion, payment_id, &self.entropy_source,
///
/// [`PaymentParameters::for_keysend`]: crate::routing::router::PaymentParameters::for_keysend
pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<PaymentHash, RetryableSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion,
payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(),
/// [`PaymentHash`] of probes based on a static secret and a random [`PaymentId`], which allows
/// us to easily discern them from real payments.
pub fn send_probe(&self, path: Path) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret,
&self.entropy_source, &self.node_signer, best_block_height,
ProbeSendFailure::RouteNotFound
})?;
- let mut used_liquidity_map = HashMap::with_capacity(first_hops.len());
+ let mut used_liquidity_map = hash_map_with_capacity(first_hops.len());
let mut res = Vec::new();
}));
}
{
- let height = self.best_block.read().unwrap().height();
+ let height = self.best_block.read().unwrap().height;
// Transactions are evaluated as final by network mempools if their locktime is strictly
// lower than the next block height. However, the modules constituting our Lightning
// node might not have perfect sync about their blockchain views. Thus, if the wallet
}
let outpoint = OutPoint { txid: tx.txid(), index: output_index.unwrap() };
if let Some(funding_batch_state) = funding_batch_state.as_mut() {
- funding_batch_state.push((outpoint.to_channel_id(), *counterparty_node_id, false));
+ // TODO(dual_funding): We only do batch funding for V1 channels at the moment, but we'll probably
+ // need to fix this somehow to not rely on using the outpoint for the channel ID if we
+ // want to support V2 batching here as well.
+ funding_batch_state.push((ChannelId::v1_from_funding_outpoint(outpoint), *counterparty_node_id, false));
}
Ok(outpoint)
})
.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
+
for channel_id in channel_ids {
if !peer_state.has_channel(channel_id) {
return Err(APIError::ChannelUnavailable {
}
if let ChannelPhase::Funded(channel) = channel_phase {
if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
- peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
} else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
node_id: channel.context.get_counterparty_node_id(),
let mut per_source_pending_forward = [(
payment.prev_short_channel_id,
payment.prev_funding_outpoint,
+ payment.prev_channel_id,
payment.prev_user_channel_id,
vec![(pending_htlc_info, payment.prev_htlc_id)]
)];
short_channel_id: payment.prev_short_channel_id,
user_channel_id: Some(payment.prev_user_channel_id),
outpoint: payment.prev_funding_outpoint,
+ channel_id: payment.prev_channel_id,
htlc_id: payment.prev_htlc_id,
incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret,
phantom_shared_secret: None,
Ok(())
}
+ fn process_pending_update_add_htlcs(&self) {
+ let mut decode_update_add_htlcs = new_hash_map();
+ mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap());
+
+ let get_failed_htlc_destination = |outgoing_scid_opt: Option<u64>, payment_hash: PaymentHash| {
+ if let Some(outgoing_scid) = outgoing_scid_opt {
+ match self.short_to_chan_info.read().unwrap().get(&outgoing_scid) {
+ Some((outgoing_counterparty_node_id, outgoing_channel_id)) =>
+ HTLCDestination::NextHopChannel {
+ node_id: Some(*outgoing_counterparty_node_id),
+ channel_id: *outgoing_channel_id,
+ },
+ None => HTLCDestination::UnknownNextHop {
+ requested_forward_scid: outgoing_scid,
+ },
+ }
+ } else {
+ HTLCDestination::FailedPayment { payment_hash }
+ }
+ };
+
+ 'outer_loop: for (incoming_scid, update_add_htlcs) in decode_update_add_htlcs {
+ let incoming_channel_details_opt = self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel<SP>| {
+ let counterparty_node_id = chan.context.get_counterparty_node_id();
+ let channel_id = chan.context.channel_id();
+ let funding_txo = chan.context.get_funding_txo().unwrap();
+ let user_channel_id = chan.context.get_user_id();
+ let accept_underpaying_htlcs = chan.context.config().accept_underpaying_htlcs;
+ (counterparty_node_id, channel_id, funding_txo, user_channel_id, accept_underpaying_htlcs)
+ });
+ let (
+ incoming_counterparty_node_id, incoming_channel_id, incoming_funding_txo,
+ incoming_user_channel_id, incoming_accept_underpaying_htlcs
+ ) = if let Some(incoming_channel_details) = incoming_channel_details_opt {
+ incoming_channel_details
+ } else {
+ // The incoming channel no longer exists, HTLCs should be resolved onchain instead.
+ continue;
+ };
+
+ let mut htlc_forwards = Vec::new();
+ let mut htlc_fails = Vec::new();
+ for update_add_htlc in &update_add_htlcs {
+ let (next_hop, shared_secret, next_packet_details_opt) = match decode_incoming_update_add_htlc_onion(
+ &update_add_htlc, &self.node_signer, &self.logger, &self.secp_ctx
+ ) {
+ Ok(decoded_onion) => decoded_onion,
+ Err(htlc_fail) => {
+ htlc_fails.push((htlc_fail, HTLCDestination::InvalidOnion));
+ continue;
+ },
+ };
+
+ let is_intro_node_blinded_forward = next_hop.is_intro_node_blinded_forward();
+ let outgoing_scid_opt = next_packet_details_opt.as_ref().map(|d| d.outgoing_scid);
+
+ // Process the HTLC on the incoming channel.
+ match self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel<SP>| {
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
+ chan.can_accept_incoming_htlc(
+ update_add_htlc, &self.fee_estimator, &logger,
+ )
+ }) {
+ Some(Ok(_)) => {},
+ Some(Err((err, code))) => {
+ let outgoing_chan_update_opt = if let Some(outgoing_scid) = outgoing_scid_opt.as_ref() {
+ self.do_funded_channel_callback(*outgoing_scid, |chan: &mut Channel<SP>| {
+ self.get_channel_update_for_onion(*outgoing_scid, chan).ok()
+ }).flatten()
+ } else {
+ None
+ };
+ let htlc_fail = self.htlc_failure_from_update_add_err(
+ &update_add_htlc, &incoming_counterparty_node_id, err, code,
+ outgoing_chan_update_opt, is_intro_node_blinded_forward, &shared_secret,
+ );
+ let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
+ htlc_fails.push((htlc_fail, htlc_destination));
+ continue;
+ },
+ // The incoming channel no longer exists, HTLCs should be resolved onchain instead.
+ None => continue 'outer_loop,
+ }
+
+ // Now process the HTLC on the outgoing channel if it's a forward.
+ if let Some(next_packet_details) = next_packet_details_opt.as_ref() {
+ if let Err((err, code, chan_update_opt)) = self.can_forward_htlc(
+ &update_add_htlc, next_packet_details
+ ) {
+ let htlc_fail = self.htlc_failure_from_update_add_err(
+ &update_add_htlc, &incoming_counterparty_node_id, err, code,
+ chan_update_opt, is_intro_node_blinded_forward, &shared_secret,
+ );
+ let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
+ htlc_fails.push((htlc_fail, htlc_destination));
+ continue;
+ }
+ }
+
+ match self.construct_pending_htlc_status(
+ &update_add_htlc, &incoming_counterparty_node_id, shared_secret, next_hop,
+ incoming_accept_underpaying_htlcs, next_packet_details_opt.map(|d| d.next_packet_pubkey),
+ ) {
+ PendingHTLCStatus::Forward(htlc_forward) => {
+ htlc_forwards.push((htlc_forward, update_add_htlc.htlc_id));
+ },
+ PendingHTLCStatus::Fail(htlc_fail) => {
+ let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
+ htlc_fails.push((htlc_fail, htlc_destination));
+ },
+ }
+ }
+
+ // Process all of the forwards and failures for the channel in which the HTLCs were
+ // proposed to as a batch.
+ let pending_forwards = (incoming_scid, incoming_funding_txo, incoming_channel_id,
+ incoming_user_channel_id, htlc_forwards.drain(..).collect());
+ self.forward_htlcs_without_forward_event(&mut [pending_forwards]);
+ for (htlc_fail, htlc_destination) in htlc_fails.drain(..) {
+ let failure = match htlc_fail {
+ HTLCFailureMsg::Relay(fail_htlc) => HTLCForwardInfo::FailHTLC {
+ htlc_id: fail_htlc.htlc_id,
+ err_packet: fail_htlc.reason,
+ },
+ HTLCFailureMsg::Malformed(fail_malformed_htlc) => HTLCForwardInfo::FailMalformedHTLC {
+ htlc_id: fail_malformed_htlc.htlc_id,
+ sha256_of_onion: fail_malformed_htlc.sha256_of_onion,
+ failure_code: fail_malformed_htlc.failure_code,
+ },
+ };
+ self.forward_htlcs.lock().unwrap().entry(incoming_scid).or_insert(vec![]).push(failure);
+ self.pending_events.lock().unwrap().push_back((events::Event::HTLCHandlingFailed {
+ prev_channel_id: incoming_channel_id,
+ failed_next_destination: htlc_destination,
+ }, None));
+ }
+ }
+ }
+
/// Processes HTLCs which are pending waiting on random forward delay.
///
/// Should only really ever be called in response to a PendingHTLCsForwardable event.
pub fn process_pending_htlc_forwards(&self) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+ self.process_pending_update_add_htlcs();
+
let mut new_events = VecDeque::new();
let mut failed_forwards = Vec::new();
- let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
+ let mut phantom_receives: Vec<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
{
- let mut forward_htlcs = HashMap::new();
+ let mut forward_htlcs = new_hash_map();
mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
for (short_chan_id, mut pending_forwards) in forward_htlcs {
for forward_info in pending_forwards.drain(..) {
match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
- forward_info: PendingHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+ prev_user_channel_id, forward_info: PendingHTLCInfo {
routing, incoming_shared_secret, payment_hash, outgoing_amt_msat,
outgoing_cltv_value, ..
}
}) => {
macro_rules! failure_handler {
($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
- let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_funding_outpoint.to_channel_id()));
+ let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id));
log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
+ channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
};
match next_hop {
onion_utils::Hop::Receive(hop_data) => {
- let current_height: u32 = self.best_block.read().unwrap().height();
+ let current_height: u32 = self.best_block.read().unwrap().height;
match create_recv_pending_htlc_info(hop_data,
incoming_shared_secret, payment_hash, outgoing_amt_msat,
outgoing_cltv_value, Some(phantom_shared_secret), false, None,
current_height, self.default_configuration.accept_mpp_keysend)
{
- Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])),
+ Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, vec![(info, prev_htlc_id)])),
Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
}
},
for forward_info in pending_forwards.drain(..) {
let queue_fail_htlc_res = match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
- forward_info: PendingHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+ prev_user_channel_id, forward_info: PendingHTLCInfo {
incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
routing: PendingHTLCRouting::Forward {
onion_packet, blinded, ..
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
+ channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
- forward_info: PendingHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+ prev_user_channel_id, forward_info: PendingHTLCInfo {
routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat,
skimmed_fee_msat, ..
}
let blinded_failure = routing.blinded_failure();
let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret, mut onion_fields) = match routing {
PendingHTLCRouting::Receive {
- payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret,
- custom_tlvs, requires_blinded_error: _
+ payment_data, payment_metadata, payment_context,
+ incoming_cltv_expiry, phantom_shared_secret, custom_tlvs,
+ requires_blinded_error: _
} => {
let _legacy_hop_data = Some(payment_data.clone());
let onion_fields = RecipientOnionFields { payment_secret: Some(payment_data.payment_secret),
payment_metadata, custom_tlvs };
- (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data },
+ (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data, payment_context },
Some(payment_data), phantom_shared_secret, onion_fields)
},
- PendingHTLCRouting::ReceiveKeysend { payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry, custom_tlvs } => {
+ PendingHTLCRouting::ReceiveKeysend {
+ payment_data, payment_preimage, payment_metadata,
+ incoming_cltv_expiry, custom_tlvs, requires_blinded_error: _
+ } => {
let onion_fields = RecipientOnionFields {
payment_secret: payment_data.as_ref().map(|data| data.payment_secret),
payment_metadata,
prev_hop: HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
+ channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
debug_assert!(!committed_to_claimable);
let mut htlc_msat_height_data = $htlc.value.to_be_bytes().to_vec();
htlc_msat_height_data.extend_from_slice(
- &self.best_block.read().unwrap().height().to_be_bytes(),
+ &self.best_block.read().unwrap().height.to_be_bytes(),
);
failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: $htlc.prev_hop.short_channel_id,
user_channel_id: $htlc.prev_hop.user_channel_id,
+ channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: $htlc.prev_hop.htlc_id,
incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
macro_rules! check_total_value {
($purpose: expr) => {{
let mut payment_claimable_generated = false;
- let is_keysend = match $purpose {
- events::PaymentPurpose::SpontaneousPayment(_) => true,
- events::PaymentPurpose::InvoicePayment { .. } => false,
- };
+ let is_keysend = $purpose.is_keysend();
let mut claimable_payments = self.claimable_payments.lock().unwrap();
if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
fail_htlc!(claimable_htlc, payment_hash);
#[allow(unused_assignments)] {
committed_to_claimable = true;
}
- let prev_channel_id = prev_funding_outpoint.to_channel_id();
htlcs.push(claimable_htlc);
let amount_msat = htlcs.iter().map(|htlc| htlc.value).sum();
htlcs.iter_mut().for_each(|htlc| htlc.total_value_received = Some(amount_msat));
match payment_secrets.entry(payment_hash) {
hash_map::Entry::Vacant(_) => {
match claimable_htlc.onion_payload {
- OnionPayload::Invoice { .. } => {
+ OnionPayload::Invoice { ref payment_context, .. } => {
let payment_data = payment_data.unwrap();
let (payment_preimage, min_final_cltv_expiry_delta) = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
Ok(result) => result,
}
};
if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
- let expected_min_expiry_height = (self.current_best_block().height() + min_final_cltv_expiry_delta as u32) as u64;
+ let expected_min_expiry_height = (self.current_best_block().height + min_final_cltv_expiry_delta as u32) as u64;
if (cltv_expiry as u64) < expected_min_expiry_height {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})",
&payment_hash, cltv_expiry, expected_min_expiry_height);
fail_htlc!(claimable_htlc, payment_hash);
}
}
- let purpose = events::PaymentPurpose::InvoicePayment {
- payment_preimage: payment_preimage.clone(),
- payment_secret: payment_data.payment_secret,
- };
+ let purpose = events::PaymentPurpose::from_parts(
+ payment_preimage.clone(),
+ payment_data.payment_secret,
+ payment_context.clone(),
+ );
check_total_value!(purpose);
},
OnionPayload::Spontaneous(preimage) => {
}
},
hash_map::Entry::Occupied(inbound_payment) => {
- if let OnionPayload::Spontaneous(_) = claimable_htlc.onion_payload {
- log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", &payment_hash);
- fail_htlc!(claimable_htlc, payment_hash);
- }
+ let payment_context = match claimable_htlc.onion_payload {
+ OnionPayload::Spontaneous(_) => {
+ log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", &payment_hash);
+ fail_htlc!(claimable_htlc, payment_hash);
+ },
+ OnionPayload::Invoice { ref payment_context, .. } => payment_context,
+ };
let payment_data = payment_data.unwrap();
if inbound_payment.get().payment_secret != payment_data.payment_secret {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", &payment_hash);
&payment_hash, payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
fail_htlc!(claimable_htlc, payment_hash);
} else {
- let purpose = events::PaymentPurpose::InvoicePayment {
- payment_preimage: inbound_payment.get().payment_preimage,
- payment_secret: payment_data.payment_secret,
- };
+ let purpose = events::PaymentPurpose::from_parts(
+ inbound_payment.get().payment_preimage,
+ payment_data.payment_secret,
+ payment_context.clone(),
+ );
let payment_claimable_generated = check_total_value!(purpose);
if payment_claimable_generated {
inbound_payment.remove_entry();
}
}
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(),
|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
&self.pending_events, &self.logger, |args| self.send_payment_along_path(args));
for event in background_events.drain(..) {
match event {
- BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
+ BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, _channel_id, update)) => {
// The channel has already been closed, so no use bothering to care about the
// monitor updating completing.
let _ = self.chain_monitor.update_channel(funding_txo, &update);
},
- BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
+ BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
let mut updated_chan = false;
{
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
+ match peer_state.channel_by_id.entry(channel_id) {
hash_map::Entry::Occupied(mut chan_phase) => {
if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
updated_chan = true;
// If the feerate has decreased by less than half, don't bother
if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
- if new_feerate != chan.context.get_feerate_sat_per_1000_weight() {
- log_trace!(logger, "Channel {} does not qualify for a feerate change from {} to {}.",
- chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
- }
return NotifyOption::SkipPersistNoEvents;
}
if !chan.context.is_live() {
if n >= DISABLE_GOSSIP_TICKS {
chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
if n >= ENABLE_GOSSIP_TICKS {
chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
pending_msg_events, counterparty_node_id)
},
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedInboundV2(chan) => {
+ process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
+ pending_msg_events, counterparty_node_id)
+ },
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedOutboundV2(chan) => {
+ process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
+ pending_msg_events, counterparty_node_id)
+ },
}
});
FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code.into()),
FailureCode::IncorrectOrUnknownPaymentDetails => {
let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
- htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
+ htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
HTLCFailReason::reason(failure_code.into(), htlc_msat_height_data)
},
FailureCode::InvalidOnionPayload(data) => {
}
}
+ fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
+ let push_forward_event = self.fail_htlc_backwards_internal_without_forward_event(source, payment_hash, onion_error, destination);
+ if push_forward_event { self.push_pending_forwards_ev(); }
+ }
+
/// Fails an HTLC backwards to the sender of it to us.
/// Note that we do not assume that channels corresponding to failed HTLCs are still available.
- fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
+ fn fail_htlc_backwards_internal_without_forward_event(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) -> bool {
// Ensure that no peer state channel storage lock is held when calling this function.
// This ensures that future code doesn't introduce a lock-order requirement for
// `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
// from block_connected which may run during initialization prior to the chain_monitor
// being fully configured. See the docs for `ChannelManagerReadArgs` for more.
+ let mut push_forward_event;
match source {
HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
- if self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
+ push_forward_event = self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
session_priv, payment_id, self.probing_cookie_secret, &self.secp_ctx,
- &self.pending_events, &self.logger)
- { self.push_pending_forwards_ev(); }
+ &self.pending_events, &self.logger);
},
HTLCSource::PreviousHopData(HTLCPreviousHopData {
ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret,
- ref phantom_shared_secret, ref outpoint, ref blinded_failure, ..
+ ref phantom_shared_secret, outpoint: _, ref blinded_failure, ref channel_id, ..
}) => {
log_trace!(
- WithContext::from(&self.logger, None, Some(outpoint.to_channel_id())),
+ WithContext::from(&self.logger, None, Some(*channel_id)),
"Failing {}HTLC with payment_hash {} backwards from us: {:?}",
if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error
);
}
};
- let mut push_forward_ev = false;
+ push_forward_event = self.decode_update_add_htlcs.lock().unwrap().is_empty();
let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
- if forward_htlcs.is_empty() {
- push_forward_ev = true;
- }
+ push_forward_event &= forward_htlcs.is_empty();
match forward_htlcs.entry(*short_channel_id) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().push(failure);
}
}
mem::drop(forward_htlcs);
- if push_forward_ev { self.push_pending_forwards_ev(); }
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push_back((events::Event::HTLCHandlingFailed {
- prev_channel_id: outpoint.to_channel_id(),
+ prev_channel_id: *channel_id,
failed_next_destination: destination,
}, None));
},
}
+ push_forward_event
}
/// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any
}
if valid_mpp {
for htlc in sources.drain(..) {
- let prev_hop_chan_id = htlc.prev_hop.outpoint.to_channel_id();
+ let prev_hop_chan_id = htlc.prev_hop.channel_id;
if let Err((pk, err)) = self.claim_funds_from_hop(
htlc.prev_hop, payment_preimage,
|_, definitely_duplicate| {
if !valid_mpp {
for htlc in sources.drain(..) {
let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
- htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
+ htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
let source = HTLCSource::PreviousHopData(htlc.prev_hop);
let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data);
let receiver = HTLCDestination::FailedPayment { payment_hash };
{
let per_peer_state = self.per_peer_state.read().unwrap();
- let chan_id = prev_hop.outpoint.to_channel_id();
+ let chan_id = prev_hop.channel_id;
let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
None => None
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id,
funding_txo: prev_hop.outpoint,
+ channel_id: prev_hop.channel_id,
update: monitor_update.clone(),
});
}
log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
chan_id, action);
- let (node_id, funding_outpoint, blocker) =
+ let (node_id, _funding_outpoint, channel_id, blocker) =
if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
downstream_counterparty_node_id: node_id,
downstream_funding_outpoint: funding_outpoint,
- blocking_action: blocker,
+ blocking_action: blocker, downstream_channel_id: channel_id,
} = action {
- (node_id, funding_outpoint, blocker)
+ (node_id, funding_outpoint, channel_id, blocker)
} else {
debug_assert!(false,
"Duplicate claims should always free another channel immediately");
let mut peer_state = peer_state_mtx.lock().unwrap();
if let Some(blockers) = peer_state
.actions_blocking_raa_monitor_updates
- .get_mut(&funding_outpoint.to_channel_id())
+ .get_mut(&channel_id)
{
let mut found_blocker = false;
blockers.retain(|iter| {
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
payment_preimage,
}],
+ channel_id: Some(prev_hop.channel_id),
};
if !during_init {
// with a preimage we *must* somehow manage to propagate it to the upstream
// channel, or we must have an ability to receive the same event and try
// again on restart.
- log_error!(WithContext::from(&self.logger, None, Some(prev_hop.outpoint.to_channel_id())), "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+ log_error!(WithContext::from(&self.logger, None, Some(prev_hop.channel_id)),
+ "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
payment_preimage, update_res);
}
} else {
// complete the monitor update completion action from `completion_action`.
self.pending_background_events.lock().unwrap().push(
BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((
- prev_hop.outpoint, preimage_update,
+ prev_hop.outpoint, prev_hop.channel_id, preimage_update,
)));
}
// Note that we do process the completion action here. This totally could be a
}
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
- forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, startup_replay: bool,
- next_channel_counterparty_node_id: Option<PublicKey>, next_channel_outpoint: OutPoint
+ forwarded_htlc_value_msat: Option<u64>, skimmed_fee_msat: Option<u64>, from_onchain: bool,
+ startup_replay: bool, next_channel_counterparty_node_id: Option<PublicKey>,
+ next_channel_outpoint: OutPoint, next_channel_id: ChannelId, next_user_channel_id: Option<u128>,
) {
match source {
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
debug_assert_eq!(pubkey, path.hops[0].pubkey);
}
let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
- channel_funding_outpoint: next_channel_outpoint,
+ channel_funding_outpoint: next_channel_outpoint, channel_id: next_channel_id,
counterparty_node_id: path.hops[0].pubkey,
};
self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage,
&self.logger);
},
HTLCSource::PreviousHopData(hop_data) => {
- let prev_outpoint = hop_data.outpoint;
+ let prev_channel_id = hop_data.channel_id;
+ let prev_user_channel_id = hop_data.user_channel_id;
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
#[cfg(debug_assertions)]
let claiming_chan_funding_outpoint = hop_data.outpoint;
|htlc_claim_value_msat, definitely_duplicate| {
let chan_to_release =
if let Some(node_id) = next_channel_counterparty_node_id {
- Some((node_id, next_channel_outpoint, completed_blocker))
+ Some((node_id, next_channel_outpoint, next_channel_id, completed_blocker))
} else {
// We can only get `None` here if we are processing a
// `ChannelMonitor`-originated event, in which case we
},
// or the channel we'd unblock is already closed,
BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup(
- (funding_txo, monitor_update)
+ (funding_txo, _channel_id, monitor_update)
) => {
if *funding_txo == next_channel_outpoint {
assert_eq!(monitor_update.updates.len(), 1);
BackgroundEvent::MonitorUpdatesComplete {
channel_id, ..
} =>
- *channel_id == claiming_chan_funding_outpoint.to_channel_id(),
+ *channel_id == prev_channel_id,
}
}), "{:?}", *background_events);
}
Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
downstream_counterparty_node_id: other_chan.0,
downstream_funding_outpoint: other_chan.1,
- blocking_action: other_chan.2,
+ downstream_channel_id: other_chan.2,
+ blocking_action: other_chan.3,
})
} else { None }
} else {
- let fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
+ let total_fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
if let Some(claimed_htlc_value) = htlc_claim_value_msat {
Some(claimed_htlc_value - forwarded_htlc_value)
} else { None }
} else { None };
+ debug_assert!(skimmed_fee_msat <= total_fee_earned_msat,
+ "skimmed_fee_msat must always be included in total_fee_earned_msat");
Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
event: events::Event::PaymentForwarded {
- fee_earned_msat,
+ prev_channel_id: Some(prev_channel_id),
+ next_channel_id: Some(next_channel_id),
+ prev_user_channel_id,
+ next_user_channel_id,
+ total_fee_earned_msat,
+ skimmed_fee_msat,
claim_from_onchain_tx: from_onchain,
- prev_channel_id: Some(prev_outpoint.to_channel_id()),
- next_channel_id: Some(next_channel_outpoint.to_channel_id()),
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
},
downstream_counterparty_and_funding_outpoint: chan_to_release,
event, downstream_counterparty_and_funding_outpoint
} => {
self.pending_events.lock().unwrap().push_back((event, None));
- if let Some((node_id, funding_outpoint, blocker)) = downstream_counterparty_and_funding_outpoint {
- self.handle_monitor_update_release(node_id, funding_outpoint, Some(blocker));
+ if let Some((node_id, funding_outpoint, channel_id, blocker)) = downstream_counterparty_and_funding_outpoint {
+ self.handle_monitor_update_release(node_id, funding_outpoint, channel_id, Some(blocker));
}
},
MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
- downstream_counterparty_node_id, downstream_funding_outpoint, blocking_action,
+ downstream_counterparty_node_id, downstream_funding_outpoint, downstream_channel_id, blocking_action,
} => {
self.handle_monitor_update_release(
downstream_counterparty_node_id,
downstream_funding_outpoint,
+ downstream_channel_id,
Some(blocking_action),
);
},
fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
channel: &mut Channel<SP>, raa: Option<msgs::RevokeAndACK>,
commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
- pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
+ pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec<msgs::UpdateAddHTLC>,
+ funding_broadcastable: Option<Transaction>,
channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
- -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
+ -> (Option<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec<msgs::UpdateAddHTLC>)>) {
let logger = WithChannelContext::from(&self.logger, &channel.context);
- log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
+ log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement",
&channel.context.channel_id(),
if raa.is_some() { "an" } else { "no" },
- if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(),
+ if commitment_update.is_some() { "a" } else { "no" },
+ pending_forwards.len(), pending_update_adds.len(),
if funding_broadcastable.is_some() { "" } else { "not " },
if channel_ready.is_some() { "sending" } else { "without" },
if announcement_sigs.is_some() { "sending" } else { "without" });
- let mut htlc_forwards = None;
-
let counterparty_node_id = channel.context.get_counterparty_node_id();
+ let short_channel_id = channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias());
+
+ let mut htlc_forwards = None;
if !pending_forwards.is_empty() {
- htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()),
- channel.context.get_funding_txo().unwrap(), channel.context.get_user_id(), pending_forwards));
+ htlc_forwards = Some((short_channel_id, channel.context.get_funding_txo().unwrap(),
+ channel.context.channel_id(), channel.context.get_user_id(), pending_forwards));
+ }
+ let mut decode_update_add_htlcs = None;
+ if !pending_update_adds.is_empty() {
+ decode_update_add_htlcs = Some((short_channel_id, pending_update_adds));
}
if let Some(msg) = channel_ready {
emit_channel_ready_event!(pending_events, channel);
}
- htlc_forwards
+ (htlc_forwards, decode_update_add_htlcs)
}
- fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
+ fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
let counterparty_node_id = match counterparty_node_id {
// TODO: Once we can rely on the counterparty_node_id from the
// monitor event, this and the outpoint_to_peer map should be removed.
let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
- match outpoint_to_peer.get(&funding_txo) {
+ match outpoint_to_peer.get(funding_txo) {
Some(cp_id) => cp_id.clone(),
None => return,
}
peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
let channel =
- if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) {
+ if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) {
chan
} else {
let update_actions = peer_state.monitor_update_blocked_actions
- .remove(&funding_txo.to_channel_id()).unwrap_or(Vec::new());
+ .remove(&channel_id).unwrap_or(Vec::new());
mem::drop(peer_state_lock);
mem::drop(per_peer_state);
self.handle_monitor_update_completion_actions(update_actions);
// happening and return an error. N.B. that we create channel with an outbound SCID of zero so
// that we can delay allocating the SCID until after we're sure that the checks below will
// succeed.
- let mut channel = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
+ let res = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
Some(unaccepted_channel) => {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features,
&unaccepted_channel.open_channel_msg, user_channel_id, &self.default_configuration, best_block_height,
- &self.logger, accept_0conf).map_err(|e| {
- let err_str = e.to_string();
- log_error!(logger, "{}", err_str);
-
- APIError::ChannelUnavailable { err: err_str }
- })
- }
+ &self.logger, accept_0conf).map_err(|err| MsgHandleErrInternal::from_chan_no_close(err, *temporary_channel_id))
+ },
_ => {
let err_str = "No such channel awaiting to be accepted.".to_owned();
log_error!(logger, "{}", err_str);
- Err(APIError::APIMisuseError { err: err_str })
+ return Err(APIError::APIMisuseError { err: err_str });
+ }
+ };
+
+ match res {
+ Err(err) => {
+ mem::drop(peer_state_lock);
+ mem::drop(per_peer_state);
+ match handle_error!(self, Result::<(), MsgHandleErrInternal>::Err(err), *counterparty_node_id) {
+ Ok(_) => unreachable!("`handle_error` only returns Err as we've passed in an Err"),
+ Err(e) => {
+ return Err(APIError::ChannelUnavailable { err: e.err });
+ },
+ }
}
- }?;
+ Ok(mut channel) => {
+ if accept_0conf {
+ // This should have been correctly configured by the call to InboundV1Channel::new.
+ debug_assert!(channel.context.minimum_depth().unwrap() == 0);
+ } else if channel.context.get_channel_type().requires_zero_conf() {
+ let send_msg_err_event = events::MessageSendEvent::HandleError {
+ node_id: channel.context.get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage{
+ msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
+ }
+ };
+ peer_state.pending_msg_events.push(send_msg_err_event);
+ let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
+ log_error!(logger, "{}", err_str);
- if accept_0conf {
- // This should have been correctly configured by the call to InboundV1Channel::new.
- debug_assert!(channel.context.minimum_depth().unwrap() == 0);
- } else if channel.context.get_channel_type().requires_zero_conf() {
- let send_msg_err_event = events::MessageSendEvent::HandleError {
- node_id: channel.context.get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage{
- msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
- }
- };
- peer_state.pending_msg_events.push(send_msg_err_event);
- let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
- log_error!(logger, "{}", err_str);
+ return Err(APIError::APIMisuseError { err: err_str });
+ } else {
+ // If this peer already has some channels, a new channel won't increase our number of peers
+ // with unfunded channels, so as long as we aren't over the maximum number of unfunded
+ // channels per-peer we can accept channels from a peer with existing ones.
+ if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
+ let send_msg_err_event = events::MessageSendEvent::HandleError {
+ node_id: channel.context.get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage{
+ msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
+ }
+ };
+ peer_state.pending_msg_events.push(send_msg_err_event);
+ let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
+ log_error!(logger, "{}", err_str);
- return Err(APIError::APIMisuseError { err: err_str });
- } else {
- // If this peer already has some channels, a new channel won't increase our number of peers
- // with unfunded channels, so as long as we aren't over the maximum number of unfunded
- // channels per-peer we can accept channels from a peer with existing ones.
- if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
- let send_msg_err_event = events::MessageSendEvent::HandleError {
- node_id: channel.context.get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage{
- msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
+ return Err(APIError::APIMisuseError { err: err_str });
}
- };
- peer_state.pending_msg_events.push(send_msg_err_event);
- let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
- log_error!(logger, "{}", err_str);
-
- return Err(APIError::APIMisuseError { err: err_str });
- }
- }
+ }
- // Now that we know we have a channel, assign an outbound SCID alias.
- let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
- channel.context.set_outbound_scid_alias(outbound_scid_alias);
+ // Now that we know we have a channel, assign an outbound SCID alias.
+ let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
+ channel.context.set_outbound_scid_alias(outbound_scid_alias);
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
- node_id: channel.context.get_counterparty_node_id(),
- msg: channel.accept_inbound_channel(),
- });
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
+ node_id: channel.context.get_counterparty_node_id(),
+ msg: channel.accept_inbound_channel(),
+ });
- peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel));
+ peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel));
- Ok(())
+ Ok(())
+ },
+ }
}
/// Gets the number of peers which match the given filter and do not have any funded, outbound,
fn peers_without_funded_channels<Filter>(&self, maybe_count_peer: Filter) -> usize
where Filter: Fn(&PeerState<SP>) -> bool {
let mut peers_without_funded_channels = 0;
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
{
let peer_state_lock = self.per_peer_state.read().unwrap();
for (_, peer_mtx) in peer_state_lock.iter() {
num_unfunded_channels += 1;
}
},
+ // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed.
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedInboundV2(chan) => {
+ // Only inbound V2 channels that are not 0conf and that we do not contribute to will be
+ // included in the unfunded count.
+ if chan.context.minimum_depth().unwrap_or(1) != 0 &&
+ chan.dual_funding_context.our_funding_satoshis == 0 {
+ num_unfunded_channels += 1;
+ }
+ },
ChannelPhase::UnfundedOutboundV1(_) => {
// Outbound channels don't contribute to the unfunded count in the DoS context.
continue;
+ },
+ // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed.
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedOutboundV2(_) => {
+ // Outbound channels don't contribute to the unfunded count in the DoS context.
+ continue;
}
}
}
fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
// Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
// likely to be lost on restart!
- if msg.chain_hash != self.chain_hash {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
+ if msg.common_fields.chain_hash != self.chain_hash {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(),
+ msg.common_fields.temporary_channel_id.clone()));
}
if !self.default_configuration.accept_inbound_channels {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), msg.temporary_channel_id.clone()));
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(),
+ msg.common_fields.temporary_channel_id.clone()));
}
// Get the number of peers with channels, but without funded ones. We don't care too much
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
- MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id.clone())
+ MsgHandleErrInternal::send_err_msg_no_close(
+ format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
+ msg.common_fields.temporary_channel_id.clone())
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
{
return Err(MsgHandleErrInternal::send_err_msg_no_close(
"Have too many peers with unfunded channels, not accepting new ones".to_owned(),
- msg.temporary_channel_id.clone()));
+ msg.common_fields.temporary_channel_id.clone()));
}
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER {
return Err(MsgHandleErrInternal::send_err_msg_no_close(
format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER),
- msg.temporary_channel_id.clone()));
+ msg.common_fields.temporary_channel_id.clone()));
}
- let channel_id = msg.temporary_channel_id;
+ let channel_id = msg.common_fields.temporary_channel_id;
let channel_exists = peer_state.has_channel(&channel_id);
if channel_exists {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone()));
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "temporary_channel_id collision for the same peer!".to_owned(),
+ msg.common_fields.temporary_channel_id.clone()));
}
// If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept.
if self.default_configuration.manually_accept_inbound_channels {
let channel_type = channel::channel_type_from_open_channel(
- &msg, &peer_state.latest_features, &self.channel_type_features()
+ &msg.common_fields, &peer_state.latest_features, &self.channel_type_features()
).map_err(|e|
- MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id)
+ MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id)
)?;
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push_back((events::Event::OpenChannelRequest {
- temporary_channel_id: msg.temporary_channel_id.clone(),
+ temporary_channel_id: msg.common_fields.temporary_channel_id.clone(),
counterparty_node_id: counterparty_node_id.clone(),
- funding_satoshis: msg.funding_satoshis,
+ funding_satoshis: msg.common_fields.funding_satoshis,
push_msat: msg.push_msat,
channel_type,
}, None));
&self.default_configuration, best_block_height, &self.logger, /*is_0conf=*/false)
{
Err(e) => {
- return Err(MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id));
+ return Err(MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id));
},
Ok(res) => res
};
let channel_type = channel.context.get_channel_type();
if channel_type.requires_zero_conf() {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "No zero confirmation channels accepted".to_owned(),
+ msg.common_fields.temporary_channel_id.clone()));
}
if channel_type.requires_anchors_zero_fee_htlc_tx() {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("No channels with anchor outputs accepted".to_owned(), msg.temporary_channel_id.clone()));
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "No channels with anchor outputs accepted".to_owned(),
+ msg.common_fields.temporary_channel_id.clone()));
}
let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
debug_assert!(false);
- MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id)
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id)
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
+ match peer_state.channel_by_id.entry(msg.common_fields.temporary_channel_id) {
hash_map::Entry::Occupied(mut phase) => {
match phase.get_mut() {
ChannelPhase::UnfundedOutboundV1(chan) => {
(chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_v0_p2wsh(), chan.context.get_user_id())
},
_ => {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id));
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id));
}
}
},
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id))
}
};
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push_back((events::Event::FundingGenerationReady {
- temporary_channel_id: msg.temporary_channel_id,
+ temporary_channel_id: msg.common_fields.temporary_channel_id,
counterparty_node_id: *counterparty_node_id,
channel_value_satoshis: value,
output_script,
let mut chan = remove_channel_phase!(self, chan_phase_entry);
finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel));
},
+ // TODO(dual_funding): Combine this match arm with above.
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::UnfundedOutboundV2(_) => {
+ let context = phase.context_mut();
+ log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
+ let mut chan = remove_channel_phase!(self, chan_phase_entry);
+ finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel));
+ },
}
} else {
return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
if let Some(ChannelPhase::Funded(chan)) = chan_option {
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
- let pending_forward_info = match decoded_hop_res {
+ let mut pending_forward_info = match decoded_hop_res {
Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
self.construct_pending_htlc_status(
msg, counterparty_node_id, shared_secret, next_hop,
),
Err(e) => PendingHTLCStatus::Fail(e)
};
- let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
+ // If the update_add is completely bogus, the call will Err and we will close,
+ // but if we've sent a shutdown and they haven't acknowledged it yet, we just
+ // want to reject the new HTLC and fail it backwards instead of forwarding.
+ if let Err((_, error_code)) = chan.can_accept_incoming_htlc(&msg, &self.fee_estimator, &logger) {
if msg.blinding_point.is_some() {
- return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
- msgs::UpdateFailMalformedHTLC {
- channel_id: msg.channel_id,
- htlc_id: msg.htlc_id,
- sha256_of_onion: [0; 32],
- failure_code: INVALID_ONION_BLINDING,
- }
- ))
- }
- // If the update_add is completely bogus, the call will Err and we will close,
- // but if we've sent a shutdown and they haven't acknowledged it yet, we just
- // want to reject the new HTLC and fail it backwards instead of forwarding.
- match pending_forward_info {
- PendingHTLCStatus::Forward(PendingHTLCInfo {
- ref incoming_shared_secret, ref routing, ..
- }) => {
- let reason = if routing.blinded_failure().is_some() {
- HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
- } else if (error_code & 0x1000) != 0 {
- let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
- HTLCFailReason::reason(real_code, error_data)
- } else {
- HTLCFailReason::from_failure_code(error_code)
- }.get_encrypted_failure_packet(incoming_shared_secret, &None);
- let msg = msgs::UpdateFailHTLC {
+ pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
+ msgs::UpdateFailMalformedHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
- reason
- };
- PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
- },
- _ => pending_forward_info
+ sha256_of_onion: [0; 32],
+ failure_code: INVALID_ONION_BLINDING,
+ }
+ ))
+ } else {
+ match pending_forward_info {
+ PendingHTLCStatus::Forward(PendingHTLCInfo {
+ ref incoming_shared_secret, ref routing, ..
+ }) => {
+ let reason = if routing.blinded_failure().is_some() {
+ HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
+ } else if (error_code & 0x1000) != 0 {
+ let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
+ HTLCFailReason::reason(real_code, error_data)
+ } else {
+ HTLCFailReason::from_failure_code(error_code)
+ }.get_encrypted_failure_packet(incoming_shared_secret, &None);
+ let msg = msgs::UpdateFailHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ reason
+ };
+ pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg));
+ },
+ _ => {},
+ }
}
- };
- let logger = WithChannelContext::from(&self.logger, &chan.context);
- try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &&logger), chan_phase_entry);
+ }
+ try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info), chan_phase_entry);
} else {
return try_chan_phase_entry!(self, Err(ChannelError::Close(
"Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry);
fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
let funding_txo;
- let (htlc_source, forwarded_htlc_value) = {
+ let next_user_channel_id;
+ let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
// outbound HTLC is claimed. This is guaranteed to all complete before we
// process the RAA as messages are processed from single peers serially.
funding_txo = chan.context.get_funding_txo().expect("We won't accept a fulfill until funded");
+ next_user_channel_id = chan.context.get_user_id();
res
} else {
return try_chan_phase_entry!(self, Err(ChannelError::Close(
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
- self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, false, Some(*counterparty_node_id), funding_txo);
+ self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(),
+ Some(forwarded_htlc_value), skimmed_fee_msat, false, false, Some(*counterparty_node_id),
+ funding_txo, msg.channel_id, Some(next_user_channel_id),
+ );
+
Ok(())
}
}
}
+ fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec<msgs::UpdateAddHTLC>)) {
+ let mut push_forward_event = self.forward_htlcs.lock().unwrap().is_empty();
+ let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
+ push_forward_event &= decode_update_add_htlcs.is_empty();
+ let scid = update_add_htlcs.0;
+ match decode_update_add_htlcs.entry(scid) {
+ hash_map::Entry::Occupied(mut e) => { e.get_mut().append(&mut update_add_htlcs.1); },
+ hash_map::Entry::Vacant(e) => { e.insert(update_add_htlcs.1); },
+ }
+ if push_forward_event { self.push_pending_forwards_ev(); }
+ }
+
+ #[inline]
+ fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) {
+ let push_forward_event = self.forward_htlcs_without_forward_event(per_source_pending_forwards);
+ if push_forward_event { self.push_pending_forwards_ev() }
+ }
+
#[inline]
- fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) {
- for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
- let mut push_forward_event = false;
+ fn forward_htlcs_without_forward_event(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) -> bool {
+ let mut push_forward_event = false;
+ for &mut (prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
let mut new_intercept_events = VecDeque::new();
let mut failed_intercept_forwards = Vec::new();
if !pending_forwards.is_empty() {
// Pull this now to avoid introducing a lock order with `forward_htlcs`.
let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid);
+ let decode_update_add_htlcs_empty = self.decode_update_add_htlcs.lock().unwrap().is_empty();
let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
let forward_htlcs_empty = forward_htlcs.is_empty();
match forward_htlcs.entry(scid) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }));
+ prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info }));
},
hash_map::Entry::Vacant(entry) => {
if !is_our_scid && forward_info.incoming_amt_msat.is_some() &&
intercept_id
}, None));
entry.insert(PendingAddHTLCInfo {
- prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info });
+ prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info });
},
hash_map::Entry::Occupied(_) => {
- let logger = WithContext::from(&self.logger, None, Some(prev_funding_outpoint.to_channel_id()));
+ let logger = WithContext::from(&self.logger, None, Some(prev_channel_id));
log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
outpoint: prev_funding_outpoint,
+ channel_id: prev_channel_id,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: forward_info.incoming_shared_secret,
phantom_shared_secret: None,
} else {
// We don't want to generate a PendingHTLCsForwardable event if only intercepted
// payments are being processed.
- if forward_htlcs_empty {
- push_forward_event = true;
- }
+ push_forward_event |= forward_htlcs_empty && decode_update_add_htlcs_empty;
entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info })));
+ prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info })));
}
}
}
}
for (htlc_source, payment_hash, failure_reason, destination) in failed_intercept_forwards.drain(..) {
- self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
+ push_forward_event |= self.fail_htlc_backwards_internal_without_forward_event(&htlc_source, &payment_hash, &failure_reason, destination);
}
if !new_intercept_events.is_empty() {
let mut events = self.pending_events.lock().unwrap();
events.append(&mut new_intercept_events);
}
- if push_forward_event { self.push_pending_forwards_ev() }
}
+ push_forward_event
}
fn push_pending_forwards_ev(&self) {
/// the [`ChannelMonitorUpdate`] in question.
fn raa_monitor_updates_held(&self,
actions_blocking_raa_monitor_updates: &BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
- channel_funding_outpoint: OutPoint, counterparty_node_id: PublicKey
+ channel_funding_outpoint: OutPoint, channel_id: ChannelId, counterparty_node_id: PublicKey
) -> bool {
actions_blocking_raa_monitor_updates
- .get(&channel_funding_outpoint.to_channel_id()).map(|v| !v.is_empty()).unwrap_or(false)
+ .get(&channel_id).map(|v| !v.is_empty()).unwrap_or(false)
|| self.pending_events.lock().unwrap().iter().any(|(_, action)| {
action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint,
+ channel_id,
counterparty_node_id,
})
})
if let Some(chan) = peer_state.channel_by_id.get(&channel_id) {
return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
- chan.context().get_funding_txo().unwrap(), counterparty_node_id);
+ chan.context().get_funding_txo().unwrap(), channel_id, counterparty_node_id);
}
}
false
let funding_txo_opt = chan.context.get_funding_txo();
let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
self.raa_monitor_updates_held(
- &peer_state.actions_blocking_raa_monitor_updates, funding_txo,
+ &peer_state.actions_blocking_raa_monitor_updates, funding_txo, msg.channel_id,
*counterparty_node_id)
} else { false };
let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self,
peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
msg: try_chan_phase_entry!(self, chan.announcement_signatures(
- &self.node_signer, self.chain_hash, self.best_block.read().unwrap().height(),
+ &self.node_signer, self.chain_hash, self.best_block.read().unwrap().height,
msg, &self.default_configuration
), chan_phase_entry),
// Note that announcement_signatures fails if the channel cannot be announced,
}
fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<NotifyOption, MsgHandleErrInternal> {
- let htlc_forwards;
let need_lnd_workaround = {
let per_peer_state = self.per_peer_state.read().unwrap();
}
}
let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take();
- htlc_forwards = self.handle_channel_resumption(
+ let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption(
&mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.order,
- Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
+ Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
+ debug_assert!(htlc_forwards.is_none());
+ debug_assert!(decode_update_add_htlcs.is_none());
if let Some(upd) = channel_update {
peer_state.pending_msg_events.push(upd);
}
}
};
- let mut persist = NotifyOption::SkipPersistHandleEvents;
- if let Some(forwards) = htlc_forwards {
- self.forward_htlcs(&mut [forwards][..]);
- persist = NotifyOption::DoPersist;
- }
-
if let Some(channel_ready_msg) = need_lnd_workaround {
self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
}
- Ok(persist)
+ Ok(NotifyOption::SkipPersistHandleEvents)
}
/// Process pending events from the [`chain::Watch`], returning whether any events were processed.
let mut failed_channels = Vec::new();
let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
let has_pending_monitor_events = !pending_monitor_events.is_empty();
- for (funding_outpoint, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
+ for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
for monitor_event in monitor_events.drain(..) {
match monitor_event {
MonitorEvent::HTLCEvent(htlc_update) => {
- let logger = WithContext::from(&self.logger, counterparty_node_id, Some(funding_outpoint.to_channel_id()));
+ let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id));
if let Some(preimage) = htlc_update.payment_preimage {
log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
- self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint);
+ self.claim_funds_internal(htlc_update.source, preimage,
+ htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
+ false, counterparty_node_id, funding_outpoint, channel_id, None);
} else {
log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
- let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
+ let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
}
},
- MonitorEvent::HolderForceClosed(funding_outpoint) => {
+ MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
let counterparty_node_id_opt = match counterparty_node_id {
Some(cp_id) => Some(cp_id),
None => {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
- if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
+ if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) {
if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
- failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed));
+ let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
+ reason
+ } else {
+ ClosureReason::HolderForceClosed
+ };
+ failed_channels.push(chan.context.force_shutdown(false, reason.clone()));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: chan.context.get_counterparty_node_id(),
action: msgs::ErrorAction::DisconnectPeer {
- msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() })
+ msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: reason.to_string() })
},
});
}
}
}
},
- MonitorEvent::Completed { funding_txo, monitor_update_id } => {
- self.channel_monitor_updated(&funding_txo, monitor_update_id, counterparty_node_id.as_ref());
+ MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => {
+ self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref());
},
}
}
// We're done with this channel. We got a closing_signed and sent back
// a closing_signed with a closing transaction to broadcast.
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
// Channel::force_shutdown tries to make us do) as we may still be in initialization,
// so we track the update internally and handle it when the user next calls
// timer_tick_occurred, guaranteeing we're running normally.
- if let Some((counterparty_node_id, funding_txo, update)) = failure.monitor_update.take() {
+ if let Some((counterparty_node_id, funding_txo, channel_id, update)) = failure.monitor_update.take() {
assert_eq!(update.updates.len(), 1);
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
assert!(should_broadcast);
} else { unreachable!(); }
self.pending_background_events.lock().unwrap().push(
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
- counterparty_node_id, funding_txo, update
+ counterparty_node_id, funding_txo, update, channel_id,
});
}
self.finish_close_channel(failure);
}
}
+}
+macro_rules! create_offer_builder { ($self: ident, $builder: ty) => {
/// Creates an [`OfferBuilder`] such that the [`Offer`] it builds is recognized by the
/// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer will
/// not have an expiration unless otherwise set on the builder.
/// [`Offer`]: crate::offers::offer::Offer
/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
pub fn create_offer_builder(
- &self, description: String
- ) -> Result<OfferBuilder<DerivedMetadata, secp256k1::All>, Bolt12SemanticError> {
- let node_id = self.get_our_node_id();
- let expanded_key = &self.inbound_payment_key;
- let entropy = &*self.entropy_source;
- let secp_ctx = &self.secp_ctx;
-
- let path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
+ &$self, description: String
+ ) -> Result<$builder, Bolt12SemanticError> {
+ let node_id = $self.get_our_node_id();
+ let expanded_key = &$self.inbound_payment_key;
+ let entropy = &*$self.entropy_source;
+ let secp_ctx = &$self.secp_ctx;
+
+ let path = $self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
let builder = OfferBuilder::deriving_signing_pubkey(
description, node_id, expanded_key, entropy, secp_ctx
)
- .chain_hash(self.chain_hash)
+ .chain_hash($self.chain_hash)
.path(path);
- Ok(builder)
+ Ok(builder.into())
}
+} }
+macro_rules! create_refund_builder { ($self: ident, $builder: ty) => {
/// Creates a [`RefundBuilder`] such that the [`Refund`] it builds is recognized by the
/// [`ChannelManager`] when handling [`Bolt12Invoice`] messages for the refund.
///
/// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths
/// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments
pub fn create_refund_builder(
- &self, description: String, amount_msats: u64, absolute_expiry: Duration,
+ &$self, description: String, amount_msats: u64, absolute_expiry: Duration,
payment_id: PaymentId, retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
- ) -> Result<RefundBuilder<secp256k1::All>, Bolt12SemanticError> {
- let node_id = self.get_our_node_id();
- let expanded_key = &self.inbound_payment_key;
- let entropy = &*self.entropy_source;
- let secp_ctx = &self.secp_ctx;
+ ) -> Result<$builder, Bolt12SemanticError> {
+ let node_id = $self.get_our_node_id();
+ let expanded_key = &$self.inbound_payment_key;
+ let entropy = &*$self.entropy_source;
+ let secp_ctx = &$self.secp_ctx;
- let path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
+ let path = $self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
let builder = RefundBuilder::deriving_payer_id(
description, node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id
)?
- .chain_hash(self.chain_hash)
+ .chain_hash($self.chain_hash)
.absolute_expiry(absolute_expiry)
.path(path);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop($self);
+
let expiration = StaleExpiration::AbsoluteTimeout(absolute_expiry);
- self.pending_outbound_payments
+ $self.pending_outbound_payments
.add_new_awaiting_invoice(
payment_id, expiration, retry_strategy, max_total_routing_fee_msat,
)
.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?;
- Ok(builder)
+ Ok(builder.into())
}
+} }
+
+impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, L>
+where
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
+ T::Target: BroadcasterInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
+ F::Target: FeeEstimator,
+ R::Target: Router,
+ L::Target: Logger,
+{
+ #[cfg(not(c_bindings))]
+ create_offer_builder!(self, OfferBuilder<DerivedMetadata, secp256k1::All>);
+ #[cfg(not(c_bindings))]
+ create_refund_builder!(self, RefundBuilder<secp256k1::All>);
+
+ #[cfg(c_bindings)]
+ create_offer_builder!(self, OfferWithDerivedMetadataBuilder);
+ #[cfg(c_bindings)]
+ create_refund_builder!(self, RefundMaybeWithDerivedMetadataBuilder);
/// Pays for an [`Offer`] using the given parameters by creating an [`InvoiceRequest`] and
/// enqueuing it to be sent via an onion message. [`ChannelManager`] will pay the actual
/// Errors if:
/// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
/// - the provided parameters are invalid for the offer,
+ /// - the offer is for an unsupported chain, or
/// - the parameterized [`Router`] is unable to create a blinded reply path for the invoice
/// request.
///
let entropy = &*self.entropy_source;
let secp_ctx = &self.secp_ctx;
- let builder = offer
+ let builder: InvoiceRequestBuilder<DerivedPayerId, secp256k1::All> = offer
.request_invoice_deriving_payer_id(expanded_key, entropy, secp_ctx, payment_id)?
- .chain_hash(self.chain_hash)?;
+ .into();
+ let builder = builder.chain_hash(self.chain_hash)?;
+
let builder = match quantity {
None => builder,
Some(quantity) => builder.quantity(quantity)?,
let invoice_request = builder.build_and_sign()?;
let reply_path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+
let expiration = StaleExpiration::TimerTicks(1);
self.pending_outbound_payments
.add_new_awaiting_invoice(
///
/// The resulting invoice uses a [`PaymentHash`] recognized by the [`ChannelManager`] and a
/// [`BlindedPath`] containing the [`PaymentSecret`] needed to reconstruct the corresponding
- /// [`PaymentPreimage`].
+ /// [`PaymentPreimage`]. It is returned purely for informational purposes.
///
/// # Limitations
///
///
/// # Errors
///
- /// Errors if the parameterized [`Router`] is unable to create a blinded payment path or reply
- /// path for the invoice.
+ /// Errors if:
+ /// - the refund is for an unsupported chain, or
+ /// - the parameterized [`Router`] is unable to create a blinded payment path or reply path for
+ /// the invoice.
///
/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
- pub fn request_refund_payment(&self, refund: &Refund) -> Result<(), Bolt12SemanticError> {
+ pub fn request_refund_payment(
+ &self, refund: &Refund
+ ) -> Result<Bolt12Invoice, Bolt12SemanticError> {
let expanded_key = &self.inbound_payment_key;
let entropy = &*self.entropy_source;
let secp_ctx = &self.secp_ctx;
let amount_msats = refund.amount_msats();
let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
+ if refund.chain() != self.chain_hash {
+ return Err(Bolt12SemanticError::UnsupportedChain);
+ }
+
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+
match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) {
Ok((payment_hash, payment_secret)) => {
- let payment_paths = self.create_blinded_payment_paths(amount_msats, payment_secret)
+ let payment_context = PaymentContext::Bolt12Refund(Bolt12RefundContext {});
+ let payment_paths = self.create_blinded_payment_paths(
+ amount_msats, payment_secret, payment_context
+ )
.map_err(|_| Bolt12SemanticError::MissingPaths)?;
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
let builder = refund.respond_using_derived_keys(
payment_paths, payment_hash, expanded_key, entropy
)?;
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
let created_at = Duration::from_secs(
self.highest_seen_timestamp.load(Ordering::Acquire) as u64
);
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
let builder = refund.respond_using_derived_keys_no_std(
payment_paths, payment_hash, created_at, expanded_key, entropy
)?;
+ let builder: InvoiceBuilder<DerivedSigningPubkey> = builder.into();
let invoice = builder.allow_mpp().build_and_sign(secp_ctx)?;
let reply_path = self.create_blinded_path()
.map_err(|_| Bolt12SemanticError::MissingPaths)?;
let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
if refund.paths().is_empty() {
let message = new_pending_onion_message(
- OffersMessage::Invoice(invoice),
+ OffersMessage::Invoice(invoice.clone()),
Destination::Node(refund.payer_id()),
Some(reply_path),
);
}
}
- Ok(())
+ Ok(invoice)
},
Err(()) => Err(Bolt12SemanticError::InvalidAmount),
}
/// This differs from [`create_inbound_payment_for_hash`] only in that it generates the
/// [`PaymentHash`] and [`PaymentPreimage`] for you.
///
- /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`], which
- /// will have the [`PaymentClaimable::purpose`] be [`PaymentPurpose::InvoicePayment`] with
- /// its [`PaymentPurpose::InvoicePayment::payment_preimage`] field filled in. That should then be
- /// passed directly to [`claim_funds`].
+ /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`] event, which
+ /// will have the [`PaymentClaimable::purpose`] return `Some` for [`PaymentPurpose::preimage`]. That
+ /// should then be passed directly to [`claim_funds`].
///
/// See [`create_inbound_payment_for_hash`] for detailed documentation on behavior and requirements.
///
/// [`claim_funds`]: Self::claim_funds
/// [`PaymentClaimable`]: events::Event::PaymentClaimable
/// [`PaymentClaimable::purpose`]: events::Event::PaymentClaimable::purpose
- /// [`PaymentPurpose::InvoicePayment`]: events::PaymentPurpose::InvoicePayment
- /// [`PaymentPurpose::InvoicePayment::payment_preimage`]: events::PaymentPurpose::InvoicePayment::payment_preimage
+ /// [`PaymentPurpose::preimage`]: events::PaymentPurpose::preimage
/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
pub fn create_inbound_payment(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32,
min_final_cltv_expiry_delta: Option<u16>) -> Result<(PaymentHash, PaymentSecret), ()> {
/// Errors if the `MessageRouter` errors or returns an empty `Vec`.
fn create_blinded_path(&self) -> Result<BlindedPath, ()> {
let recipient = self.get_our_node_id();
- let entropy_source = self.entropy_source.deref();
let secp_ctx = &self.secp_ctx;
let peers = self.per_peer_state.read().unwrap()
.collect::<Vec<_>>();
self.router
- .create_blinded_paths(recipient, peers, entropy_source, secp_ctx)
+ .create_blinded_paths(recipient, peers, secp_ctx)
.and_then(|paths| paths.into_iter().next().ok_or(()))
}
/// Creates multi-hop blinded payment paths for the given `amount_msats` by delegating to
/// [`Router::create_blinded_payment_paths`].
fn create_blinded_payment_paths(
- &self, amount_msats: u64, payment_secret: PaymentSecret
+ &self, amount_msats: u64, payment_secret: PaymentSecret, payment_context: PaymentContext
) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
- let entropy_source = self.entropy_source.deref();
let secp_ctx = &self.secp_ctx;
let first_hops = self.list_usable_channels();
let payee_node_id = self.get_our_node_id();
- let max_cltv_expiry = self.best_block.read().unwrap().height() + CLTV_FAR_FAR_AWAY
+ let max_cltv_expiry = self.best_block.read().unwrap().height + CLTV_FAR_FAR_AWAY
+ LATENCY_GRACE_PERIOD_BLOCKS;
let payee_tlvs = ReceiveTlvs {
payment_secret,
max_cltv_expiry,
htlc_minimum_msat: 1,
},
+ payment_context,
};
self.router.create_blinded_payment_paths(
- payee_node_id, first_hops, payee_tlvs, amount_msats, entropy_source, secp_ctx
+ payee_node_id, first_hops, payee_tlvs, amount_msats, secp_ctx
)
}
///
/// [phantom node payments]: crate::sign::PhantomKeysManager
pub fn get_phantom_scid(&self) -> u64 {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let short_to_chan_info = self.short_to_chan_info.read().unwrap();
loop {
let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
/// Note that this method is not guaranteed to return unique values, you may need to call it a few
/// times to get a unique scid.
pub fn get_intercept_scid(&self) -> u64 {
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
let short_to_chan_info = self.short_to_chan_info.read().unwrap();
loop {
let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
/// [`Event`] being handled) completes, this should be called to restore the channel to normal
/// operation. It will double-check that nothing *else* is also blocking the same channel from
/// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly.
- fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
+ fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey,
+ channel_funding_outpoint: OutPoint, channel_id: ChannelId,
+ mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
+
let logger = WithContext::from(
- &self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id())
+ &self.logger, Some(counterparty_node_id), Some(channel_id),
);
loop {
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(blocker) = completed_blocker.take() {
// Only do this on the first iteration of the loop.
if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
- .get_mut(&channel_funding_outpoint.to_channel_id())
+ .get_mut(&channel_id)
{
blockers.retain(|iter| iter != &blocker);
}
}
if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
- channel_funding_outpoint, counterparty_node_id) {
+ channel_funding_outpoint, channel_id, counterparty_node_id) {
// Check that, while holding the peer lock, we don't have anything else
// blocking monitor updates for this channel. If we do, release the monitor
// update(s) when those blockers complete.
log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
- &channel_funding_outpoint.to_channel_id());
+ &channel_id);
break;
}
- if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
+ if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(
+ channel_id) {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint);
if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
- channel_funding_outpoint.to_channel_id());
+ channel_id);
handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
peer_state_lck, peer_state, per_peer_state, chan);
if further_update_exists {
}
} else {
log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update",
- channel_funding_outpoint.to_channel_id());
+ channel_id);
}
}
}
for action in actions {
match action {
EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
- channel_funding_outpoint, counterparty_node_id
+ channel_funding_outpoint, channel_id, counterparty_node_id
} => {
- self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, None);
+ self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, channel_id, None);
}
}
}
/// will randomly be placed first or last in the returned array.
///
/// Note that even though `BroadcastChannelAnnouncement` and `BroadcastChannelUpdate`
- /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be pleaced among
+ /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be placed among
/// the `MessageSendEvent`s to the specific peer they were generated under.
fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
let events = RefCell::new(Vec::new());
result = NotifyOption::DoPersist;
}
+ let mut is_any_peer_connected = false;
let mut pending_events = Vec::new();
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
if peer_state.pending_msg_events.len() > 0 {
pending_events.append(&mut peer_state.pending_msg_events);
}
+ if peer_state.is_connected {
+ is_any_peer_connected = true
+ }
+ }
+
+ // Ensure that we are connected to some peers before getting broadcast messages.
+ if is_any_peer_connected {
+ let mut broadcast_msgs = self.pending_broadcast_messages.lock().unwrap();
+ pending_events.append(&mut broadcast_msgs);
}
if !pending_events.is_empty() {
fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
{
let best_block = self.best_block.read().unwrap();
- assert_eq!(best_block.block_hash(), header.prev_blockhash,
+ assert_eq!(best_block.block_hash, header.prev_blockhash,
"Blocks must be connected in chain-order - the connected header must build on the last connected header");
- assert_eq!(best_block.height(), height - 1,
+ assert_eq!(best_block.height, height - 1,
"Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
}
let new_height = height - 1;
{
let mut best_block = self.best_block.write().unwrap();
- assert_eq!(best_block.block_hash(), header.block_hash(),
+ assert_eq!(best_block.block_hash, header.block_hash(),
"Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
- assert_eq!(best_block.height(), height,
+ assert_eq!(best_block.height, height,
"Blocks must be disconnected in chain-order - the disconnected block must have the correct height");
*best_block = BestBlock::new(header.prev_blockhash, new_height)
}
self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context))
.map(|(a, b)| (a, Vec::new(), b)));
- let last_best_block_height = self.best_block.read().unwrap().height();
+ let last_best_block_height = self.best_block.read().unwrap().height;
if height < last_best_block_height {
let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
+
peer_state.channel_by_id.retain(|_, phase| {
match phase {
// Retain unfunded channels.
ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => true,
+ // TODO(dual_funding): Combine this match arm with above.
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => true,
ChannelPhase::Funded(channel) => {
let res = f(channel);
if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
let reason_message = format!("{}", reason);
failed_channels.push(channel.context.force_shutdown(true, reason));
if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret,
phantom_shared_secret: None,
outpoint: htlc.prev_funding_outpoint,
+ channel_id: htlc.prev_channel_id,
blinded_failure: htlc.forward_info.routing.blinded_failure(),
});
HTLCFailReason::from_failure_code(0x2000 | 2),
HTLCDestination::InvalidForward { requested_forward_scid }));
let logger = WithContext::from(
- &self.logger, None, Some(htlc.prev_funding_outpoint.to_channel_id())
+ &self.logger, None, Some(htlc.prev_channel_id)
);
log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
false
}
/// Returns true if this [`ChannelManager`] needs to be persisted.
+ ///
+ /// See [`Self::get_event_or_persistence_needed_future`] for retrieving a [`Future`] that
+ /// indicates this should be checked.
pub fn get_and_clear_needs_persistence(&self) -> bool {
self.needs_persist_flag.swap(false, Ordering::AcqRel)
}
fn handle_open_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannelV2) {
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Dual-funded channels not supported".to_owned(),
- msg.temporary_channel_id.clone())), *counterparty_node_id);
+ msg.common_fields.temporary_channel_id.clone())), *counterparty_node_id);
}
fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) {
fn handle_accept_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannelV2) {
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Dual-funded channels not supported".to_owned(),
- msg.temporary_channel_id.clone())), *counterparty_node_id);
+ msg.common_fields.temporary_channel_id.clone())), *counterparty_node_id);
}
fn handle_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) {
msg.channel_id.clone())), *counterparty_node_id);
}
+ #[cfg(splicing)]
fn handle_splice(&self, counterparty_node_id: &PublicKey, msg: &msgs::Splice) {
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Splicing not supported".to_owned(),
msg.channel_id.clone())), *counterparty_node_id);
}
+ #[cfg(splicing)]
fn handle_splice_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceAck) {
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Splicing not supported (splice_ack)".to_owned(),
msg.channel_id.clone())), *counterparty_node_id);
}
+ #[cfg(splicing)]
fn handle_splice_locked(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceLocked) {
let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
"Splicing not supported (splice_locked)".to_owned(),
}
&mut chan.context
},
- // Unfunded channels will always be removed.
- ChannelPhase::UnfundedOutboundV1(chan) => {
- &mut chan.context
+ // We retain UnfundedOutboundV1 channel for some time in case
+ // peer unexpectedly disconnects, and intends to reconnect again.
+ ChannelPhase::UnfundedOutboundV1(_) => {
+ return true;
},
+ // Unfunded inbound channels will always be removed.
ChannelPhase::UnfundedInboundV1(chan) => {
&mut chan.context
},
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedOutboundV2(chan) => {
+ &mut chan.context
+ },
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedInboundV2(chan) => {
+ &mut chan.context
+ },
};
// Clean up for removal.
update_maps_on_chan_removal!(self, &context);
// Gossip
&events::MessageSendEvent::SendChannelAnnouncement { .. } => false,
&events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
- &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
+ // [`ChannelManager::pending_broadcast_events`] holds the [`BroadcastChannelUpdate`]
+ // This check here is to ensure exhaustivity.
+ &events::MessageSendEvent::BroadcastChannelUpdate { .. } => {
+ debug_assert!(false, "This event shouldn't have been here");
+ false
+ },
&events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
&events::MessageSendEvent::SendChannelUpdate { .. } => false,
&events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
return NotifyOption::SkipPersistNoEvents;
}
e.insert(Mutex::new(PeerState {
- channel_by_id: HashMap::new(),
- inbound_channel_request_by_id: HashMap::new(),
+ channel_by_id: new_hash_map(),
+ inbound_channel_request_by_id: new_hash_map(),
latest_features: init_msg.features.clone(),
pending_msg_events: Vec::new(),
in_flight_monitor_updates: BTreeMap::new(),
let mut peer_state = e.get().lock().unwrap();
peer_state.latest_features = init_msg.features.clone();
- let best_block_height = self.best_block.read().unwrap().height();
+ let best_block_height = self.best_block.read().unwrap().height;
if inbound_peer_limited &&
Self::unfunded_channel_count(&*peer_state, best_block_height) ==
peer_state.channel_by_id.len()
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
- peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)|
- if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
- ).for_each(|chan| {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
- pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
- node_id: chan.context.get_counterparty_node_id(),
- msg: chan.get_channel_reestablish(&&logger),
- });
- });
+ for (_, phase) in peer_state.channel_by_id.iter_mut() {
+ match phase {
+ ChannelPhase::Funded(chan) => {
+ let logger = WithChannelContext::from(&self.logger, &chan.context);
+ pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
+ node_id: chan.context.get_counterparty_node_id(),
+ msg: chan.get_channel_reestablish(&&logger),
+ });
+ }
+
+ ChannelPhase::UnfundedOutboundV1(chan) => {
+ pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
+ node_id: chan.context.get_counterparty_node_id(),
+ msg: chan.get_open_channel(self.chain_hash),
+ });
+ }
+
+ // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed.
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedOutboundV2(chan) => {
+ pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
+ node_id: chan.context.get_counterparty_node_id(),
+ msg: chan.get_open_channel_v2(self.chain_hash),
+ });
+ },
+
+ ChannelPhase::UnfundedInboundV1(_) => {
+ // Since unfunded inbound channel maps are cleared upon disconnecting a peer,
+ // they are not persisted and won't be recovered after a crash.
+ // Therefore, they shouldn't exist at this point.
+ debug_assert!(false);
+ }
+
+ // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed.
+ #[cfg(any(dual_funding, splicing))]
+ ChannelPhase::UnfundedInboundV2(channel) => {
+ // Since unfunded inbound channel maps are cleared upon disconnecting a peer,
+ // they are not persisted and won't be recovered after a crash.
+ // Therefore, they shouldn't exist at this point.
+ debug_assert!(false);
+ },
+ }
+ }
}
return NotifyOption::SkipPersistHandleEvents;
}
fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
-
match &msg.data as &str {
"cannot co-op close channel w/ active htlcs"|
"link failed to shutdown" =>
// We're not going to bother handling this in a sensible way, instead simply
// repeating the Shutdown message on repeat until morale improves.
if !msg.channel_id.is_zero() {
- let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if peer_state_mutex_opt.is_none() { return; }
- let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
- if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
- if let Some(msg) = chan.get_outbound_shutdown() {
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: *counterparty_node_id,
- msg,
- });
- }
- peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
- node_id: *counterparty_node_id,
- action: msgs::ErrorAction::SendWarningMessage {
- msg: msgs::WarningMessage {
- channel_id: msg.channel_id,
- data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
- },
- log_level: Level::Trace,
+ PersistenceNotifierGuard::optionally_notify(
+ self,
+ || -> NotifyOption {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+ if peer_state_mutex_opt.is_none() { return NotifyOption::SkipPersistNoEvents; }
+ let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
+ if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
+ if let Some(msg) = chan.get_outbound_shutdown() {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: *counterparty_node_id,
+ msg,
+ });
+ }
+ peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: *counterparty_node_id,
+ action: msgs::ErrorAction::SendWarningMessage {
+ msg: msgs::WarningMessage {
+ channel_id: msg.channel_id,
+ data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
+ },
+ log_level: Level::Trace,
+ }
+ });
+ // This can happen in a fairly tight loop, so we absolutely cannot trigger
+ // a `ChannelManager` write here.
+ return NotifyOption::SkipPersistHandleEvents;
}
- });
- }
+ NotifyOption::SkipPersistNoEvents
+ }
+ );
}
return;
}
_ => {}
}
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+
if msg.channel_id.is_zero() {
let channel_ids: Vec<ChannelId> = {
let per_peer_state = self.per_peer_state.read().unwrap();
if peer_state_mutex_opt.is_none() { return; }
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
- if let Some(ChannelPhase::UnfundedOutboundV1(chan)) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
- if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
- node_id: *counterparty_node_id,
- msg,
- });
- return;
- }
+ match peer_state.channel_by_id.get_mut(&msg.channel_id) {
+ Some(ChannelPhase::UnfundedOutboundV1(ref mut chan)) => {
+ if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
+ node_id: *counterparty_node_id,
+ msg,
+ });
+ return;
+ }
+ },
+ #[cfg(any(dual_funding, splicing))]
+ Some(ChannelPhase::UnfundedOutboundV2(ref mut chan)) => {
+ if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
+ node_id: *counterparty_node_id,
+ msg,
+ });
+ return;
+ }
+ },
+ None | Some(ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::Funded(_)) => (),
+ #[cfg(any(dual_funding, splicing))]
+ Some(ChannelPhase::UnfundedInboundV2(_)) => (),
}
}
},
};
+ let payment_context = PaymentContext::Bolt12Offer(Bolt12OfferContext {
+ offer_id: invoice_request.offer_id,
+ invoice_request: invoice_request.fields(),
+ });
let payment_paths = match self.create_blinded_payment_paths(
- amount_msats, payment_secret
+ amount_msats, payment_secret, payment_context
) {
Ok(payment_paths) => payment_paths,
Err(()) => {
},
};
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
let created_at = Duration::from_secs(
self.highest_seen_timestamp.load(Ordering::Acquire) as u64
);
- if invoice_request.keys.is_some() {
- #[cfg(not(feature = "no-std"))]
+ let response = if invoice_request.keys.is_some() {
+ #[cfg(feature = "std")]
let builder = invoice_request.respond_using_derived_keys(
payment_paths, payment_hash
);
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
let builder = invoice_request.respond_using_derived_keys_no_std(
payment_paths, payment_hash, created_at
);
- match builder.and_then(|b| b.allow_mpp().build_and_sign(secp_ctx)) {
- Ok(invoice) => Some(OffersMessage::Invoice(invoice)),
- Err(error) => Some(OffersMessage::InvoiceError(error.into())),
- }
+ builder
+ .map(InvoiceBuilder::<DerivedSigningPubkey>::from)
+ .and_then(|builder| builder.allow_mpp().build_and_sign(secp_ctx))
+ .map_err(InvoiceError::from)
} else {
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
let builder = invoice_request.respond_with(payment_paths, payment_hash);
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
let builder = invoice_request.respond_with_no_std(
payment_paths, payment_hash, created_at
);
- let response = builder.and_then(|builder| builder.allow_mpp().build())
- .map_err(|e| OffersMessage::InvoiceError(e.into()))
- .and_then(|invoice|
- match invoice.sign(|invoice| self.node_signer.sign_bolt12_invoice(invoice)) {
- Ok(invoice) => Ok(OffersMessage::Invoice(invoice)),
- Err(SignError::Signing(())) => Err(OffersMessage::InvoiceError(
- InvoiceError::from_string("Failed signing invoice".to_string())
- )),
- Err(SignError::Verification(_)) => Err(OffersMessage::InvoiceError(
- InvoiceError::from_string("Failed invoice signature verification".to_string())
- )),
- });
- match response {
- Ok(invoice) => Some(invoice),
- Err(error) => Some(error),
- }
+ builder
+ .map(InvoiceBuilder::<ExplicitSigningPubkey>::from)
+ .and_then(|builder| builder.allow_mpp().build())
+ .map_err(InvoiceError::from)
+ .and_then(|invoice| {
+ #[cfg(c_bindings)]
+ let mut invoice = invoice;
+ invoice
+ .sign(|invoice: &UnsignedBolt12Invoice|
+ self.node_signer.sign_bolt12_invoice(invoice)
+ )
+ .map_err(InvoiceError::from)
+ })
+ };
+
+ match response {
+ Ok(invoice) => Some(OffersMessage::Invoice(invoice)),
+ Err(error) => Some(OffersMessage::InvoiceError(error.into())),
}
},
OffersMessage::Invoice(invoice) => {
- match invoice.verify(expanded_key, secp_ctx) {
- Err(()) => {
- Some(OffersMessage::InvoiceError(InvoiceError::from_string("Unrecognized invoice".to_owned())))
- },
- Ok(_) if invoice.invoice_features().requires_unknown_bits_from(&self.bolt12_invoice_features()) => {
- Some(OffersMessage::InvoiceError(Bolt12SemanticError::UnknownRequiredFeatures.into()))
- },
- Ok(payment_id) => {
- if let Err(e) = self.send_payment_for_bolt12_invoice(&invoice, payment_id) {
- log_trace!(self.logger, "Failed paying invoice: {:?}", e);
- Some(OffersMessage::InvoiceError(InvoiceError::from_string(format!("{:?}", e))))
+ let response = invoice
+ .verify(expanded_key, secp_ctx)
+ .map_err(|()| InvoiceError::from_string("Unrecognized invoice".to_owned()))
+ .and_then(|payment_id| {
+ let features = self.bolt12_invoice_features();
+ if invoice.invoice_features().requires_unknown_bits_from(&features) {
+ Err(InvoiceError::from(Bolt12SemanticError::UnknownRequiredFeatures))
} else {
- None
+ self.send_payment_for_bolt12_invoice(&invoice, payment_id)
+ .map_err(|e| {
+ log_trace!(self.logger, "Failed paying invoice: {:?}", e);
+ InvoiceError::from_string(format!("{:?}", e))
+ })
}
- },
+ });
+
+ match response {
+ Ok(()) => None,
+ Err(e) => Some(OffersMessage::InvoiceError(e)),
}
},
OffersMessage::InvoiceError(invoice_error) => {
}
}
+impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
+NodeIdLookUp for ChannelManager<M, T, ES, NS, SP, F, R, L>
+where
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
+ T::Target: BroadcasterInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
+ F::Target: FeeEstimator,
+ R::Target: Router,
+ L::Target: Logger,
+{
+ fn next_node_id(&self, short_channel_id: u64) -> Option<PublicKey> {
+ self.short_to_chan_info.read().unwrap().get(&short_channel_id).map(|(pubkey, _)| *pubkey)
+ }
+}
+
/// Fetches the set of [`NodeFeatures`] flags that are provided by or required by
/// [`ChannelManager`].
pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures {
(37, user_channel_id_high_opt, option),
(39, self.feerate_sat_per_1000_weight, option),
(41, self.channel_shutdown_state, option),
+ (43, self.pending_inbound_htlcs, optional_vec),
+ (45, self.pending_outbound_htlcs, optional_vec),
});
Ok(())
}
(37, user_channel_id_high_opt, option),
(39, feerate_sat_per_1000_weight, option),
(41, channel_shutdown_state, option),
+ (43, pending_inbound_htlcs, optional_vec),
+ (45, pending_outbound_htlcs, optional_vec),
});
// `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
inbound_htlc_maximum_msat,
feerate_sat_per_1000_weight,
channel_shutdown_state,
+ pending_inbound_htlcs: pending_inbound_htlcs.unwrap_or(Vec::new()),
+ pending_outbound_htlcs: pending_outbound_htlcs.unwrap_or(Vec::new()),
})
}
}
(3, payment_metadata, option),
(5, custom_tlvs, optional_vec),
(7, requires_blinded_error, (default_value, false)),
+ (9, payment_context, option),
},
(2, ReceiveKeysend) => {
(0, payment_preimage, required),
+ (1, requires_blinded_error, (default_value, false)),
(2, incoming_cltv_expiry, required),
(3, payment_metadata, option),
(4, payment_data, option), // Added in 0.0.116
(4, htlc_id, required),
(6, incoming_packet_shared_secret, required),
(7, user_channel_id, option),
+ // Note that by the time we get past the required read for type 2 above, outpoint will be
+ // filled in, so we can safely unwrap it here.
+ (9, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(outpoint.0.unwrap()))),
});
impl Writeable for ClaimableHTLC {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
- let (payment_data, keysend_preimage) = match &self.onion_payload {
- OnionPayload::Invoice { _legacy_hop_data } => (_legacy_hop_data.as_ref(), None),
- OnionPayload::Spontaneous(preimage) => (None, Some(preimage)),
+ let (payment_data, keysend_preimage, payment_context) = match &self.onion_payload {
+ OnionPayload::Invoice { _legacy_hop_data, payment_context } => {
+ (_legacy_hop_data.as_ref(), None, payment_context.as_ref())
+ },
+ OnionPayload::Spontaneous(preimage) => (None, Some(preimage), None),
};
write_tlv_fields!(writer, {
(0, self.prev_hop, required),
(6, self.cltv_expiry, required),
(8, keysend_preimage, option),
(10, self.counterparty_skimmed_fee_msat, option),
+ (11, payment_context, option),
});
Ok(())
}
(6, cltv_expiry, required),
(8, keysend_preimage, option),
(10, counterparty_skimmed_fee_msat, option),
+ (11, payment_context, option),
});
let payment_data: Option<msgs::FinalOnionHopData> = payment_data_opt;
let value = value_ser.0.unwrap();
}
total_msat = Some(payment_data.as_ref().unwrap().total_msat);
}
- OnionPayload::Invoice { _legacy_hop_data: payment_data }
+ OnionPayload::Invoice { _legacy_hop_data: payment_data, payment_context }
},
};
Ok(Self {
(2, prev_short_channel_id, required),
(4, prev_htlc_id, required),
(6, prev_funding_outpoint, required),
+ // Note that by the time we get past the required read for type 6 above, prev_funding_outpoint will be
+ // filled in, so we can safely unwrap it here.
+ (7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))),
});
impl Writeable for HTLCForwardInfo {
self.chain_hash.write(writer)?;
{
let best_block = self.best_block.read().unwrap();
- best_block.height().write(writer)?;
- best_block.block_hash().write(writer)?;
+ best_block.height.write(writer)?;
+ best_block.block_hash.write(writer)?;
}
+ let per_peer_state = self.per_peer_state.write().unwrap();
+
let mut serializable_peer_count: u64 = 0;
{
- let per_peer_state = self.per_peer_state.read().unwrap();
let mut number_of_funded_channels = 0;
for (_, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
}
}
- let per_peer_state = self.per_peer_state.write().unwrap();
+ let mut decode_update_add_htlcs_opt = None;
+ let decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
+ if !decode_update_add_htlcs.is_empty() {
+ decode_update_add_htlcs_opt = Some(decode_update_add_htlcs);
+ }
let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
let claimable_payments = self.claimable_payments.lock().unwrap();
}
// Encode without retry info for 0.0.101 compatibility.
- let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = HashMap::new();
+ let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = new_hash_map();
for (id, outbound) in pending_outbound_payments.iter() {
match outbound {
PendingOutboundPayment::Legacy { session_privs } |
for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
for (funding_outpoint, updates) in peer_state.in_flight_monitor_updates.iter() {
if !updates.is_empty() {
- if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(HashMap::new()); }
+ if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(new_hash_map()); }
in_flight_monitor_updates.as_mut().unwrap().insert((counterparty_id, funding_outpoint), updates);
}
}
(10, in_flight_monitor_updates, option),
(11, self.probing_cookie_secret, required),
(13, htlc_onion_fields, optional_vec),
+ (14, decode_update_add_htlcs_opt, option),
});
Ok(())
mut channel_monitors: Vec<&'a mut ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>) -> Self {
Self {
entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster, router, logger, default_config,
- channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect()
+ channel_monitors: hash_map_from_iter(
+ channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) })
+ ),
}
}
}
let mut failed_htlcs = Vec::new();
let channel_count: u64 = Readable::read(reader)?;
- let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
- let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
- let mut outpoint_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
- let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+ let mut funding_txo_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128));
+ let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
+ let mut outpoint_to_peer = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
+ let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
let mut channel_closures = VecDeque::new();
let mut close_background_events = Vec::new();
+ let mut funding_txo_to_channel_id = hash_map_with_capacity(channel_count as usize);
for _ in 0..channel_count {
let mut channel: Channel<SP> = Channel::read(reader, (
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
))?;
let logger = WithChannelContext::from(&args.logger, &channel.context);
let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
+ funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id());
funding_txo_set.insert(funding_txo.clone());
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
return Err(DecodeError::InvalidValue);
}
- if let Some((counterparty_node_id, funding_txo, update)) = shutdown_result.monitor_update {
+ if let Some((counterparty_node_id, funding_txo, channel_id, update)) = shutdown_result.monitor_update {
close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
- counterparty_node_id, funding_txo, update
+ counterparty_node_id, funding_txo, channel_id, update
});
}
failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs);
by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
},
hash_map::Entry::Vacant(entry) => {
- let mut by_id_map = HashMap::new();
+ let mut by_id_map = new_hash_map();
by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
entry.insert(by_id_map);
}
for (funding_txo, monitor) in args.channel_monitors.iter() {
if !funding_txo_set.contains(funding_txo) {
let logger = WithChannelMonitor::from(&args.logger, monitor);
+ let channel_id = monitor.channel_id();
log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
- &funding_txo.to_channel_id());
+ &channel_id);
let monitor_update = ChannelMonitorUpdate {
update_id: CLOSED_CHANNEL_UPDATE_ID,
counterparty_node_id: None,
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
+ channel_id: Some(monitor.channel_id()),
};
- close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
+ close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, channel_id, monitor_update)));
}
}
const MAX_ALLOC_SIZE: usize = 1024 * 64;
let forward_htlcs_count: u64 = Readable::read(reader)?;
- let mut forward_htlcs = HashMap::with_capacity(cmp::min(forward_htlcs_count as usize, 128));
+ let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128));
for _ in 0..forward_htlcs_count {
let short_channel_id = Readable::read(reader)?;
let pending_forwards_count: u64 = Readable::read(reader)?;
let peer_state_from_chans = |channel_by_id| {
PeerState {
channel_by_id,
- inbound_channel_request_by_id: HashMap::new(),
+ inbound_channel_request_by_id: new_hash_map(),
latest_features: InitFeatures::empty(),
pending_msg_events: Vec::new(),
in_flight_monitor_updates: BTreeMap::new(),
};
let peer_count: u64 = Readable::read(reader)?;
- let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
+ let mut per_peer_state = hash_map_with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
for _ in 0..peer_count {
let peer_pubkey = Readable::read(reader)?;
- let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new());
+ let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(new_hash_map());
let mut peer_state = peer_state_from_chans(peer_chans);
peer_state.latest_features = Readable::read(reader)?;
per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
let highest_seen_timestamp: u32 = Readable::read(reader)?;
let pending_inbound_payment_count: u64 = Readable::read(reader)?;
- let mut pending_inbound_payments: HashMap<PaymentHash, PendingInboundPayment> = HashMap::with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32)));
+ let mut pending_inbound_payments: HashMap<PaymentHash, PendingInboundPayment> = hash_map_with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32)));
for _ in 0..pending_inbound_payment_count {
if pending_inbound_payments.insert(Readable::read(reader)?, Readable::read(reader)?).is_some() {
return Err(DecodeError::InvalidValue);
let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?;
let mut pending_outbound_payments_compat: HashMap<PaymentId, PendingOutboundPayment> =
- HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
+ hash_map_with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
for _ in 0..pending_outbound_payments_count_compat {
let session_priv = Readable::read(reader)?;
let payment = PendingOutboundPayment::Legacy {
- session_privs: [session_priv].iter().cloned().collect()
+ session_privs: hash_set_from_iter([session_priv]),
};
if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() {
return Err(DecodeError::InvalidValue)
// pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients.
let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> = None;
let mut pending_outbound_payments = None;
- let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> = Some(HashMap::new());
+ let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> = Some(new_hash_map());
let mut received_network_pubkey: Option<PublicKey> = None;
let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
let mut probing_cookie_secret: Option<[u8; 32]> = None;
let mut claimable_htlc_purposes = None;
let mut claimable_htlc_onion_fields = None;
- let mut pending_claiming_payments = Some(HashMap::new());
+ let mut pending_claiming_payments = Some(new_hash_map());
let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> = Some(Vec::new());
let mut events_override = None;
let mut in_flight_monitor_updates: Option<HashMap<(PublicKey, OutPoint), Vec<ChannelMonitorUpdate>>> = None;
+ let mut decode_update_add_htlcs: Option<HashMap<u64, Vec<msgs::UpdateAddHTLC>>> = None;
read_tlv_fields!(reader, {
(1, pending_outbound_payments_no_retry, option),
(2, pending_intercepted_htlcs, option),
(10, in_flight_monitor_updates, option),
(11, probing_cookie_secret, option),
(13, claimable_htlc_onion_fields, optional_vec),
+ (14, decode_update_add_htlcs, option),
});
+ let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map());
if fake_scid_rand_bytes.is_none() {
fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
}
if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
pending_outbound_payments = Some(pending_outbound_payments_compat);
} else if pending_outbound_payments.is_none() {
- let mut outbounds = HashMap::new();
+ let mut outbounds = new_hash_map();
for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() {
outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
}
$chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
for update in $chan_in_flight_upds.iter() {
log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
- update.update_id, $channel_info_log, &$funding_txo.to_channel_id());
+ update.update_id, $channel_info_log, &$monitor.channel_id());
max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
pending_background_events.push(
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id: $counterparty_node_id,
funding_txo: $funding_txo,
+ channel_id: $monitor.channel_id(),
update: update.clone(),
});
}
pending_background_events.push(
BackgroundEvent::MonitorUpdatesComplete {
counterparty_node_id: $counterparty_node_id,
- channel_id: $funding_txo.to_channel_id(),
+ channel_id: $monitor.channel_id(),
});
}
if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() {
}
}
if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
- // If the channel is ahead of the monitor, return InvalidValue:
+ // If the channel is ahead of the monitor, return DangerousValue:
log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id);
log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
- return Err(DecodeError::InvalidValue);
+ return Err(DecodeError::DangerousValue);
}
} else {
// We shouldn't have persisted (or read) any unfunded channel types so none should have been
if let Some(in_flight_upds) = in_flight_monitor_updates {
for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
- let logger = WithContext::from(&args.logger, Some(counterparty_id), Some(funding_txo.to_channel_id()));
+ let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied();
+ let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id);
if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
// Now that we've removed all the in-flight monitor updates for channels that are
// still open, we need to replay any monitor updates that are for closed channels,
// creating the neccessary peer_state entries as we go.
let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| {
- Mutex::new(peer_state_from_chans(HashMap::new()))
+ Mutex::new(peer_state_from_chans(new_hash_map()))
});
let mut peer_state = peer_state_mutex.lock().unwrap();
handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,
funding_txo, monitor, peer_state, logger, "closed ");
} else {
log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
- log_error!(logger, " The ChannelMonitor for channel {} is missing.",
- &funding_txo.to_channel_id());
+ log_error!(logger, " The ChannelMonitor for channel {} is missing.", if let Some(channel_id) =
+ channel_id { channel_id.to_string() } else { format!("with outpoint {}", funding_txo) } );
log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+ log_error!(logger, " Pending in-flight updates are: {:?}", chan_in_flight_updates);
return Err(DecodeError::InvalidValue);
}
}
retry_strategy: None,
attempts: PaymentAttempts::new(),
payment_params: None,
- session_privs: [session_priv_bytes].iter().map(|a| *a).collect(),
+ session_privs: hash_set_from_iter([session_priv_bytes]),
payment_hash: htlc.payment_hash,
payment_secret: None, // only used for retries, and we'll never retry on startup
payment_metadata: None, // only used for retries, and we'll never retry on startup
// still have an entry for this HTLC in `forward_htlcs` or
// `pending_intercepted_htlcs`, we were apparently not persisted after
// the monitor was when forwarding the payment.
+ decode_update_add_htlcs.retain(|scid, update_add_htlcs| {
+ update_add_htlcs.retain(|update_add_htlc| {
+ let matches = *scid == prev_hop_data.short_channel_id &&
+ update_add_htlc.htlc_id == prev_hop_data.htlc_id;
+ if matches {
+ log_info!(logger, "Removing pending to-decode HTLC with hash {} as it was forwarded to the closed channel {}",
+ &htlc.payment_hash, &monitor.channel_id());
+ }
+ !matches
+ });
+ !update_add_htlcs.is_empty()
+ });
forward_htlcs.retain(|_, forwards| {
forwards.retain(|forward| {
if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
if pending_forward_matches_htlc(&htlc_info) {
log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
- &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
+ &htlc.payment_hash, &monitor.channel_id());
false
} else { true }
} else { true }
pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
if pending_forward_matches_htlc(&htlc_info) {
log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
- &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
+ &htlc.payment_hash, &monitor.channel_id());
pending_events_read.retain(|(event, _)| {
if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
intercepted_id != ev_id
let compl_action =
EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint: monitor.get_funding_txo().0,
+ channel_id: monitor.channel_id(),
counterparty_node_id: path.hops[0].pubkey,
};
pending_outbounds.claim_htlc(payment_id, preimage, session_priv,
// channel_id -> peer map entry).
counterparty_opt.is_none(),
counterparty_opt.cloned().or(monitor.get_counterparty_node_id()),
- monitor.get_funding_txo().0))
+ monitor.get_funding_txo().0, monitor.channel_id()))
} else { None }
} else {
// If it was an outbound payment, we've handled it above - if a preimage
}
}
- if !forward_htlcs.is_empty() || pending_outbounds.needs_abandon() {
+ if !forward_htlcs.is_empty() || !decode_update_add_htlcs.is_empty() || pending_outbounds.needs_abandon() {
// If we have pending HTLCs to forward, assume we either dropped a
// `PendingHTLCsForwardable` or the user received it but never processed it as they
// shut down before the timer hit. Either way, set the time_forwardable to a small
let inbound_pmt_key_material = args.node_signer.get_inbound_payment_key_material();
let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
- let mut claimable_payments = HashMap::with_capacity(claimable_htlcs_list.len());
+ let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len());
if let Some(purposes) = claimable_htlc_purposes {
if purposes.len() != claimable_htlcs_list.len() {
return Err(DecodeError::InvalidValue);
return Err(DecodeError::InvalidValue);
}
let purpose = match &htlcs[0].onion_payload {
- OnionPayload::Invoice { _legacy_hop_data } => {
+ OnionPayload::Invoice { _legacy_hop_data, payment_context: _ } => {
if let Some(hop_data) = _legacy_hop_data {
- events::PaymentPurpose::InvoicePayment {
+ events::PaymentPurpose::Bolt11InvoicePayment {
payment_preimage: match pending_inbound_payments.get(&payment_hash) {
Some(inbound_payment) => inbound_payment.payment_preimage,
None => match inbound_payment::verify(payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger) {
}
}
- let mut outbound_scid_aliases = HashSet::new();
+ let mut outbound_scid_aliases = new_hash_set();
for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
// this channel as well. On the flip side, there's no harm in restarting
// without the new monitor persisted - we'll end up right back here on
// restart.
- let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id();
+ let previous_channel_id = claimable_htlc.prev_hop.channel_id;
if let Some(peer_node_id) = outpoint_to_peer.get(&claimable_htlc.prev_hop.outpoint) {
let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap();
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
for action in actions.iter() {
if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
downstream_counterparty_and_funding_outpoint:
- Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), ..
+ Some((blocked_node_id, _blocked_channel_outpoint, blocked_channel_id, blocking_action)), ..
} = action {
- if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
+ if let Some(blocked_peer_state) = per_peer_state.get(blocked_node_id) {
log_trace!(logger,
"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
- blocked_channel_outpoint.to_channel_id());
+ blocked_channel_id);
blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
- .entry(blocked_channel_outpoint.to_channel_id())
+ .entry(*blocked_channel_id)
.or_insert_with(Vec::new).push(blocking_action.clone());
} else {
// If the channel we were blocking has closed, we don't need to
pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
forward_htlcs: Mutex::new(forward_htlcs),
+ decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs),
claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }),
outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
outpoint_to_peer: Mutex::new(outpoint_to_peer),
pending_offers_messages: Mutex::new(Vec::new()),
+ pending_broadcast_messages: Mutex::new(Vec::new()),
+
entropy_source: args.entropy_source,
node_signer: args.node_signer,
signer_provider: args.signer_provider,
channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
- for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding) in pending_claims_to_replay {
+ for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding, downstream_channel_id) in pending_claims_to_replay {
// We use `downstream_closed` in place of `from_onchain` here just as a guess - we
// don't remember in the `ChannelMonitor` where we got a preimage from, but if the
// channel is closed we just assume that it probably came from an on-chain claim.
- channel_manager.claim_funds_internal(source, preimage, Some(downstream_value),
- downstream_closed, true, downstream_node_id, downstream_funding);
+ channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None,
+ downstream_closed, true, downstream_node_id, downstream_funding,
+ downstream_channel_id, None
+ );
}
//TODO: Broadcast channel update for closed channels, but only after we've made a
}
}
+ #[test]
+ fn test_channel_update_cached() {
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ nodes[0].node.force_close_channel_with_peer(&chan.2, &nodes[1].node.get_our_node_id(), None, true).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+
+ // Confirm that the channel_update was not sent immediately to node[1] but was cached.
+ let node_1_events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(node_1_events.len(), 0);
+
+ {
+ // Assert that ChannelUpdate message has been added to node[0] pending broadcast messages
+ let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
+ assert_eq!(pending_broadcast_messages.len(), 1);
+ }
+
+ // Test that we do not retrieve the pending broadcast messages when we are not connected to any peer
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+ nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+ nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+ let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(node_0_events.len(), 0);
+
+ // Now we reconnect to a peer
+ nodes[0].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init {
+ features: nodes[2].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
+ nodes[2].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
+
+ // Confirm that get_and_clear_pending_msg_events correctly captures pending broadcast messages
+ let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(node_0_events.len(), 1);
+ match &node_0_events[0] {
+ MessageSendEvent::BroadcastChannelUpdate { .. } => (),
+ _ => panic!("Unexpected event"),
+ }
+ {
+ // Assert that ChannelUpdate message has been cleared from nodes[0] pending broadcast messages
+ let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
+ assert_eq!(pending_broadcast_messages.len(), 0);
+ }
+ }
+
#[test]
fn test_drop_disconnected_peers_when_removing_channels() {
let chanmon_cfgs = create_chanmon_cfgs(2);
}
let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
+ check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
}
fn check_not_connected_to_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
check_added_monitors!(nodes[0], 1);
expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
}
- open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
+ open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
}
// A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected
- open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
+ open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(
+ &nodes[0].keys_manager);
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
- open_channel_msg.temporary_channel_id);
+ open_channel_msg.common_fields.temporary_channel_id);
// Further, because all of our channels with nodes[0] are inbound, and none of them funded,
// it doesn't count as a "protected" peer, i.e. it counts towards the MAX_NO_CHANNEL_PEERS
for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
nodes[1].node.handle_open_channel(&peer_pks[i], &open_channel_msg);
get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]);
- open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
+ open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
}
nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
- open_channel_msg.temporary_channel_id);
+ open_channel_msg.common_fields.temporary_channel_id);
// Of course, however, outbound channels are always allowed
nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None, None).unwrap();
for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
- open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
+ open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
}
// Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be
// rejected.
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
- open_channel_msg.temporary_channel_id);
+ open_channel_msg.common_fields.temporary_channel_id);
// but we can still open an outbound channel.
nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
// but even with such an outbound channel, additional inbound channels will still fail.
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
- open_channel_msg.temporary_channel_id);
+ open_channel_msg.common_fields.temporary_channel_id);
}
#[test]
_ => panic!("Unexpected event"),
}
get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk);
- open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
+ open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
}
// If we try to accept a channel from another peer non-0conf it will fail.
_ => panic!("Unexpected event"),
}
assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
- open_channel_msg.temporary_channel_id);
+ open_channel_msg.common_fields.temporary_channel_id);
// ...however if we accept the same channel 0conf it should work just fine.
nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
};
// Check that if the amount we received + the penultimate hop extra fee is less than the sender
// intended amount, we fail the payment.
- let current_height: u32 = node[0].node.best_block.read().unwrap().height();
+ let current_height: u32 = node[0].node.best_block.read().unwrap().height;
if let Err(crate::ln::channelmanager::InboundHTLCErr { err_code, .. }) =
create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat),
}),
custom_tlvs: Vec::new(),
};
- let current_height: u32 = node[0].node.best_block.read().unwrap().height();
+ let current_height: u32 = node[0].node.best_block.read().unwrap().height;
assert!(create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat),
current_height, node[0].node.default_configuration.accept_mpp_keysend).is_ok());
let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
let node = create_network(1, &node_cfg, &node_chanmgr);
- let current_height: u32 = node[0].node.best_block.read().unwrap().height();
+ let current_height: u32 = node[0].node.best_block.read().unwrap().height;
let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive {
sender_intended_htlc_amt_msat: 100,
cltv_expiry_height: 22,
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None, None).unwrap();
let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- assert!(open_channel_msg.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx());
+ assert!(open_channel_msg.common_fields.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx());
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
let events = nodes[1].node.get_and_clear_pending_events();
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &error_msg);
let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- assert!(!open_channel_msg.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx());
+ assert!(!open_channel_msg.common_fields.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx());
// Since nodes[1] should not have accepted the channel, it should
// not have generated any events.
let (scid_1, scid_2) = (42, 43);
- let mut forward_htlcs = HashMap::new();
+ let mut forward_htlcs = new_hash_map();
forward_htlcs.insert(scid_1, dummy_htlcs_1.clone());
forward_htlcs.insert(scid_2, dummy_htlcs_2.clone());
use bitcoin::blockdata::locktime::absolute::LockTime;
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
- use bitcoin::{Block, Transaction, TxOut};
+ use bitcoin::{Transaction, TxOut};
use crate::sync::{Arc, Mutex, RwLock};
let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
let scorer = RwLock::new(test_utils::TestScorer::new());
- let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &scorer);
+ let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &logger_a, &scorer);
let mut config: UserConfig = Default::default();
config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);
assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
- let block = create_dummy_block(BestBlock::from_network(network).block_hash(), 42, vec![tx]);
+ let block = create_dummy_block(BestBlock::from_network(network).block_hash, 42, vec![tx]);
Listen::block_connected(&node_a, &block, 1);
Listen::block_connected(&node_b, &block, 1);
//! for more info).
//! - `Keysend` - send funds to a node without an invoice
//! (see the [`Keysend` feature assignment proposal](https://github.com/lightning/bolts/issues/605#issuecomment-606679798) for more information).
+//! - `Trampoline` - supports receiving and forwarding Trampoline payments
+//! (see the [`Trampoline` feature proposal](https://github.com/lightning/bolts/pull/836) for more information).
//!
//! LDK knows about the following features, but does not support them:
//! - `AnchorsNonzeroFeeHtlcTx` - the initial version of anchor outputs, which was later found to be
//! [BOLT #9]: https://github.com/lightning/bolts/blob/master/09-features.md
//! [messages]: crate::ln::msgs
-use crate::{io, io_extras};
+#[allow(unused_imports)]
use crate::prelude::*;
+
+use crate::{io, io_extras};
use core::{cmp, fmt};
use core::borrow::Borrow;
use core::hash::{Hash, Hasher};
use crate::util::ser::{Readable, WithoutLength, Writeable, Writer};
mod sealed {
+ #[allow(unused_imports)]
use crate::prelude::*;
use crate::ln::features::Features;
ChannelType | SCIDPrivacy,
// Byte 6
ZeroConf,
+ // Byte 7
+ Trampoline,
]);
define_context!(NodeContext, [
// Byte 0
ChannelType | SCIDPrivacy,
// Byte 6
ZeroConf | Keysend,
+ // Byte 7
+ Trampoline,
]);
define_context!(ChannelContext, []);
define_context!(Bolt11InvoiceContext, [
,
// Byte 6
PaymentMetadata,
+ // Byte 7
+ Trampoline,
]);
define_context!(OfferContext, []);
define_context!(InvoiceRequestContext, []);
define_feature!(55, Keysend, [NodeContext],
"Feature flags for keysend payments.", set_keysend_optional, set_keysend_required,
supports_keysend, requires_keysend);
+ define_feature!(57, Trampoline, [InitContext, NodeContext, Bolt11InvoiceContext],
+ "Feature flags for Trampoline routing.", set_trampoline_routing_optional, set_trampoline_routing_required,
+ supports_trampoline_routing, requires_trampoline_routing);
// Note: update the module-level docs when a new feature bit is added!
#[cfg(test)]
}
}
+impl<T: sealed::RouteBlinding> Features<T> {
+ #[cfg(test)]
+ pub(crate) fn clear_route_blinding(&mut self) {
+ <T as sealed::RouteBlinding>::clear_bits(&mut self.flags);
+ }
+}
+
#[cfg(test)]
impl<T: sealed::UnknownFeature> Features<T> {
pub(crate) fn unknown() -> Self {
//! nodes for functional tests.
use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen, Watch, chainmonitor::Persist};
-use crate::sign::EntropySource;
use crate::chain::channelmonitor::ChannelMonitor;
use crate::chain::transaction::OutPoint;
use crate::events::{ClaimedHTLC, ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, PaymentFailureReason};
use crate::events::bump_transaction::{BumpTransactionEvent, BumpTransactionEventHandler, Wallet, WalletSource};
use crate::ln::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret};
use crate::ln::channelmanager::{AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, PaymentId, MIN_CLTV_EXPIRY_DELTA};
-use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate};
-use crate::routing::router::{self, PaymentParameters, Route, RouteParameters};
use crate::ln::features::InitFeatures;
use crate::ln::msgs;
-use crate::ln::msgs::{ChannelMessageHandler,RoutingMessageHandler};
-use crate::util::test_channel_signer::TestChannelSigner;
+use crate::ln::msgs::{ChannelMessageHandler, OnionMessageHandler, RoutingMessageHandler};
+use crate::ln::peer_handler::IgnoringMessageHandler;
+use crate::onion_message::messenger::OnionMessenger;
+use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate};
+use crate::routing::router::{self, PaymentParameters, Route, RouteParameters};
+use crate::sign::{EntropySource, RandomBytes};
+use crate::util::config::{UserConfig, MaxDustHTLCExposure};
+use crate::util::errors::APIError;
+#[cfg(test)]
+use crate::util::logger::Logger;
use crate::util::scid_utils;
+use crate::util::test_channel_signer::TestChannelSigner;
use crate::util::test_utils;
use crate::util::test_utils::{panicking, TestChainMonitor, TestScorer, TestKeysInterface};
-use crate::util::errors::APIError;
-use crate::util::config::{UserConfig, MaxDustHTLCExposure};
use crate::util::ser::{ReadableArgs, Writeable};
-#[cfg(test)]
-use crate::util::logger::Logger;
use bitcoin::blockdata::block::{Block, Header, Version};
use bitcoin::blockdata::locktime::absolute::LockTime;
use bitcoin::pow::CompactTarget;
use bitcoin::secp256k1::{PublicKey, SecretKey};
+use alloc::rc::Rc;
+use core::cell::RefCell;
+use core::iter::repeat;
+use core::mem;
+use core::ops::Deref;
use crate::io;
use crate::prelude::*;
-use core::cell::RefCell;
-use alloc::rc::Rc;
use crate::sync::{Arc, Mutex, LockTestExt, RwLock};
-use core::mem;
-use core::iter::repeat;
pub const CHAN_CONFIRM_DEPTH: u32 = 10;
fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) {
// Ensure `get_claimable_balances`' self-tests never panic
- for funding_outpoint in node.chain_monitor.chain_monitor.list_monitors() {
+ for (funding_outpoint, _channel_id) in node.chain_monitor.chain_monitor.list_monitors() {
node.chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances();
}
}
pub tx_broadcaster: &'a test_utils::TestBroadcaster,
pub fee_estimator: &'a test_utils::TestFeeEstimator,
pub router: test_utils::TestRouter<'a>,
+ pub message_router: test_utils::TestMessageRouter<'a>,
pub chain_monitor: test_utils::TestChainMonitor<'a>,
pub keys_manager: &'a test_utils::TestKeysInterface,
pub logger: &'a test_utils::TestLogger,
&'chan_mon_cfg test_utils::TestLogger,
>;
+type TestOnionMessenger<'chan_man, 'node_cfg, 'chan_mon_cfg> = OnionMessenger<
+ DedicatedEntropy,
+ &'node_cfg test_utils::TestKeysInterface,
+ &'chan_mon_cfg test_utils::TestLogger,
+ &'chan_man TestChannelManager<'node_cfg, 'chan_mon_cfg>,
+ &'node_cfg test_utils::TestMessageRouter<'chan_mon_cfg>,
+ &'chan_man TestChannelManager<'node_cfg, 'chan_mon_cfg>,
+ IgnoringMessageHandler,
+>;
+
+/// For use with [`OnionMessenger`] otherwise `test_restored_packages_retry` will fail. This is
+/// because that test uses older serialized data produced by calling [`EntropySource`] in a specific
+/// manner. Using the same [`EntropySource`] with [`OnionMessenger`] would introduce another call,
+/// causing the produced data to no longer match.
+pub struct DedicatedEntropy(RandomBytes);
+
+impl Deref for DedicatedEntropy {
+ type Target = RandomBytes;
+ fn deref(&self) -> &Self::Target { &self.0 }
+}
+
pub struct Node<'chan_man, 'node_cfg: 'chan_man, 'chan_mon_cfg: 'node_cfg> {
pub chain_source: &'chan_mon_cfg test_utils::TestChainSource,
pub tx_broadcaster: &'chan_mon_cfg test_utils::TestBroadcaster,
pub chain_monitor: &'node_cfg test_utils::TestChainMonitor<'chan_mon_cfg>,
pub keys_manager: &'chan_mon_cfg test_utils::TestKeysInterface,
pub node: &'chan_man TestChannelManager<'node_cfg, 'chan_mon_cfg>,
+ pub onion_messenger: TestOnionMessenger<'chan_man, 'node_cfg, 'chan_mon_cfg>,
pub network_graph: &'node_cfg NetworkGraph<&'chan_mon_cfg test_utils::TestLogger>,
pub gossip_sync: P2PGossipSync<&'node_cfg NetworkGraph<&'chan_mon_cfg test_utils::TestLogger>, &'chan_mon_cfg test_utils::TestChainSource, &'chan_mon_cfg test_utils::TestLogger>,
pub node_seed: [u8; 32],
&'chan_mon_cfg test_utils::TestLogger,
>,
}
+
+impl<'a, 'b, 'c> Node<'a, 'b, 'c> {
+ pub fn init_features(&self, peer_node_id: &PublicKey) -> InitFeatures {
+ self.override_init_features.borrow().clone()
+ .unwrap_or_else(|| self.node.init_features() | self.onion_messenger.provided_init_features(peer_node_id))
+ }
+}
+
#[cfg(feature = "std")]
impl<'a, 'b, 'c> std::panic::UnwindSafe for Node<'a, 'b, 'c> {}
#[cfg(feature = "std")]
/// `release_commitment_secret` are affected by this setting.
#[cfg(test)]
pub fn set_channel_signer_available(&self, peer_id: &PublicKey, chan_id: &ChannelId, available: bool) {
+ use crate::sign::ChannelSigner;
+ log_debug!(self.logger, "Setting channel signer for {} as available={}", chan_id, available);
+
let per_peer_state = self.node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(peer_id).unwrap().lock().unwrap();
- let signer = (|| {
- match chan_lock.channel_by_id.get(chan_id) {
- Some(phase) => phase.context().get_signer(),
- None => panic!("Couldn't find a channel with id {}", chan_id),
+
+ let mut channel_keys_id = None;
+ if let Some(chan) = chan_lock.channel_by_id.get(chan_id).map(|phase| phase.context()) {
+ chan.get_signer().as_ecdsa().unwrap().set_available(available);
+ channel_keys_id = Some(chan.channel_keys_id);
+ }
+
+ let mut monitor = None;
+ for (funding_txo, channel_id) in self.chain_monitor.chain_monitor.list_monitors() {
+ if *chan_id == channel_id {
+ monitor = self.chain_monitor.chain_monitor.get_monitor(funding_txo).ok();
}
- })();
- log_debug!(self.logger, "Setting channel signer for {} as available={}", chan_id, available);
- signer.as_ecdsa().unwrap().set_available(available);
+ }
+ if let Some(monitor) = monitor {
+ monitor.do_signer_call(|signer| {
+ channel_keys_id = channel_keys_id.or(Some(signer.inner.channel_keys_id()));
+ signer.set_available(available)
+ });
+ }
+
+ if available {
+ self.keys_manager.unavailable_signers.lock().unwrap()
+ .remove(channel_keys_id.as_ref().unwrap());
+ } else {
+ self.keys_manager.unavailable_signers.lock().unwrap()
+ .insert(channel_keys_id.unwrap());
+ }
}
}
let feeest = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
let mut deserialized_monitors = Vec::new();
{
- for outpoint in self.chain_monitor.chain_monitor.list_monitors() {
+ for (outpoint, _channel_id) in self.chain_monitor.chain_monitor.list_monitors() {
let mut w = test_utils::TestVecWriter(Vec::new());
self.chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut w).unwrap();
let (_, deserialized_monitor) = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(
// Before using all the new monitors to check the watch outpoints, use the full set of
// them to ensure we can write and reload our ChannelManager.
{
- let mut channel_monitors = HashMap::new();
+ let mut channel_monitors = new_hash_map();
for monitor in deserialized_monitors.iter_mut() {
channel_monitors.insert(monitor.get_funding_txo().0, monitor);
}
node_signer: self.keys_manager,
signer_provider: self.keys_manager,
fee_estimator: &test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) },
- router: &test_utils::TestRouter::new(Arc::new(network_graph), &scorer),
+ router: &test_utils::TestRouter::new(Arc::new(network_graph), &self.logger, &scorer),
chain_monitor: self.chain_monitor,
tx_broadcaster: &broadcaster,
logger: &self.logger,
let chain_source = test_utils::TestChainSource::new(Network::Testnet);
let chain_monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &broadcaster, &self.logger, &feeest, &persister, &self.keys_manager);
for deserialized_monitor in deserialized_monitors.drain(..) {
- if chain_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) != Ok(ChannelMonitorUpdateStatus::Completed) {
+ let funding_outpoint = deserialized_monitor.get_funding_txo().0;
+ if chain_monitor.watch_channel(funding_outpoint, deserialized_monitor) != Ok(ChannelMonitorUpdateStatus::Completed) {
panic!();
}
}
let mut node_read = &chanman_encoded[..];
let (_, node_deserialized) = {
- let mut channel_monitors = HashMap::new();
+ let mut channel_monitors = new_hash_map();
for monitor in monitors_read.iter_mut() {
assert!(channel_monitors.insert(monitor.get_funding_txo().0, monitor).is_none());
}
assert!(node_read.is_empty());
for monitor in monitors_read.drain(..) {
- assert_eq!(node.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
+ let funding_outpoint = monitor.get_funding_txo().0;
+ assert_eq!(node.chain_monitor.watch_channel(funding_outpoint, monitor),
Ok(ChannelMonitorUpdateStatus::Completed));
check_added_monitors!(node, 1);
}
$new_channelmanager = _reload_node(&$node, $new_config, &chanman_encoded, $monitors_encoded);
$node.node = &$new_channelmanager;
+ $node.onion_messenger.set_offers_handler(&$new_channelmanager);
};
($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => {
reload_node!($node, $crate::util::config::UserConfig::default(), $chanman_encoded, $monitors_encoded, $persister, $new_chain_monitor, $new_channelmanager);
};
let accept_channel = get_event_msg!(receiver, MessageSendEvent::SendAcceptChannel, initiator.node.get_our_node_id());
- assert_eq!(accept_channel.minimum_depth, 0);
+ assert_eq!(accept_channel.common_fields.minimum_depth, 0);
initiator.node.handle_accept_channel(&receiver.node.get_our_node_id(), &accept_channel);
let (temporary_channel_id, tx, _) = create_funding_transaction(&initiator, &receiver.node.get_our_node_id(), 100_000, 42);
pub fn exchange_open_accept_chan<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, push_msat: u64) -> ChannelId {
let create_chan_id = node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42, None, None).unwrap();
let open_channel_msg = get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id());
- assert_eq!(open_channel_msg.temporary_channel_id, create_chan_id);
+ assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
assert_eq!(node_a.node.list_channels().iter().find(|channel| channel.channel_id == create_chan_id).unwrap().user_channel_id, 42);
node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), &open_channel_msg);
if node_b.node.get_current_default_configuration().manually_accept_inbound_channels {
};
}
let accept_channel_msg = get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id());
- assert_eq!(accept_channel_msg.temporary_channel_id, create_chan_id);
+ assert_eq!(accept_channel_msg.common_fields.temporary_channel_id, create_chan_id);
node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), &accept_channel_msg);
assert_ne!(node_b.node.list_channels().iter().find(|channel| channel.channel_id == create_chan_id).unwrap().user_channel_id, 0);
}}
}
+/// Checks if at least one peer is connected.
+fn is_any_peer_connected(node: &Node) -> bool {
+ let peer_state = node.node.per_peer_state.read().unwrap();
+ for (_, peer_mutex) in peer_state.iter() {
+ let peer = peer_mutex.lock().unwrap();
+ if peer.is_connected { return true; }
+ }
+ false
+}
+
/// Check that a channel's closing channel update has been broadcasted, and optionally
/// check whether an error message event has occurred.
pub fn check_closed_broadcast(node: &Node, num_channels: usize, with_error_msg: bool) -> Vec<msgs::ErrorMessage> {
+ let mut dummy_connected = false;
+ if !is_any_peer_connected(node) {
+ connect_dummy_node(&node);
+ dummy_connected = true;
+ }
let msg_events = node.node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), if with_error_msg { num_channels * 2 } else { num_channels });
+ if dummy_connected {
+ disconnect_dummy_node(&node);
+ }
msg_events.into_iter().filter_map(|msg_event| {
match msg_event {
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
/// there are any [`Event::HTLCHandlingFailed`] events their [`HTLCDestination`] is included in the
/// `expected_failures` set.
pub fn expect_pending_htlcs_forwardable_conditions(events: Vec<Event>, expected_failures: &[HTLCDestination]) {
- match events[0] {
- Event::PendingHTLCsForwardable { .. } => { },
- _ => panic!("Unexpected event {:?}", events),
- };
-
let count = expected_failures.len() + 1;
assert_eq!(events.len(), count);
-
+ assert!(events.iter().find(|event| matches!(event, Event::PendingHTLCsForwardable { .. })).is_some());
if expected_failures.len() > 0 {
expect_htlc_handling_failed_destinations!(events, expected_failures)
}
assert_eq!(expected_recv_value, *amount_msat);
assert_eq!(expected_receiver_node_id, receiver_node_id.unwrap());
match purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
+ assert_eq!(&expected_payment_preimage, payment_preimage);
+ assert_eq!(expected_payment_secret, *payment_secret);
+ },
+ PaymentPurpose::Bolt12OfferPayment { payment_preimage, payment_secret, .. } => {
+ assert_eq!(&expected_payment_preimage, payment_preimage);
+ assert_eq!(expected_payment_secret, *payment_secret);
+ },
+ PaymentPurpose::Bolt12RefundPayment { payment_preimage, payment_secret, .. } => {
assert_eq!(&expected_payment_preimage, payment_preimage);
assert_eq!(expected_payment_secret, *payment_secret);
},
}
}
+/// Returns the total fee earned by this HTLC forward, in msat.
pub fn expect_payment_forwarded<CM: AChannelManager, H: NodeHolder<CM=CM>>(
event: Event, node: &H, prev_node: &H, next_node: &H, expected_fee: Option<u64>,
- upstream_force_closed: bool, downstream_force_closed: bool
-) {
+ expected_extra_fees_msat: Option<u64>, upstream_force_closed: bool,
+ downstream_force_closed: bool, allow_1_msat_fee_overpay: bool,
+) -> Option<u64> {
match event {
Event::PaymentForwarded {
- fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id,
- outbound_amount_forwarded_msat: _
+ prev_channel_id, next_channel_id, prev_user_channel_id, next_user_channel_id,
+ total_fee_earned_msat, skimmed_fee_msat, claim_from_onchain_tx, ..
} => {
- assert_eq!(fee_earned_msat, expected_fee);
+ if allow_1_msat_fee_overpay {
+ // Aggregating fees for blinded paths may result in a rounding error, causing slight
+ // overpayment in fees.
+ let actual_fee = total_fee_earned_msat.unwrap();
+ let expected_fee = expected_fee.unwrap();
+ assert!(actual_fee == expected_fee || actual_fee == expected_fee + 1);
+ } else {
+ assert_eq!(total_fee_earned_msat, expected_fee);
+ }
+
+ // Check that the (knowingly) withheld amount is always less or equal to the expected
+ // overpaid amount.
+ assert!(skimmed_fee_msat == expected_extra_fees_msat);
if !upstream_force_closed {
// Is the event prev_channel_id in one of the channels between the two nodes?
- assert!(node.node().list_channels().iter().any(|x| x.counterparty.node_id == prev_node.node().get_our_node_id() && x.channel_id == prev_channel_id.unwrap()));
+ assert!(node.node().list_channels().iter().any(|x|
+ x.counterparty.node_id == prev_node.node().get_our_node_id() &&
+ x.channel_id == prev_channel_id.unwrap() &&
+ x.user_channel_id == prev_user_channel_id.unwrap()
+ ));
}
// We check for force closures since a force closed channel is removed from the
// node's channel list
if !downstream_force_closed {
- assert!(node.node().list_channels().iter().any(|x| x.counterparty.node_id == next_node.node().get_our_node_id() && x.channel_id == next_channel_id.unwrap()));
+ // As documented, `next_user_channel_id` will only be `Some` if we didn't settle via an
+ // onchain transaction, just as the `total_fee_earned_msat` field. Rather than
+ // introducing yet another variable, we use the latter's state as a flag to detect
+ // this and only check if it's `Some`.
+ if total_fee_earned_msat.is_none() {
+ assert!(node.node().list_channels().iter().any(|x|
+ x.counterparty.node_id == next_node.node().get_our_node_id() &&
+ x.channel_id == next_channel_id.unwrap()
+ ));
+ } else {
+ assert!(node.node().list_channels().iter().any(|x|
+ x.counterparty.node_id == next_node.node().get_our_node_id() &&
+ x.channel_id == next_channel_id.unwrap() &&
+ x.user_channel_id == next_user_channel_id.unwrap()
+ ));
+ }
}
assert_eq!(claim_from_onchain_tx, downstream_force_closed);
+ total_fee_earned_msat
},
_ => panic!("Unexpected event"),
}
}
+#[macro_export]
macro_rules! expect_payment_forwarded {
($node: expr, $prev_node: expr, $next_node: expr, $expected_fee: expr, $upstream_force_closed: expr, $downstream_force_closed: expr) => {
let mut events = $node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
$crate::ln::functional_test_utils::expect_payment_forwarded(
- events.pop().unwrap(), &$node, &$prev_node, &$next_node, $expected_fee,
- $upstream_force_closed, $downstream_force_closed);
+ events.pop().unwrap(), &$node, &$prev_node, &$next_node, $expected_fee, None,
+ $upstream_force_closed, $downstream_force_closed, false
+ );
}
}
}
}
-pub fn do_pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option<PaymentSecret>, ev: MessageSendEvent, payment_claimable_expected: bool, clear_recipient_events: bool, expected_preimage: Option<PaymentPreimage>, is_probe: bool) -> Option<Event> {
+pub struct PassAlongPathArgs<'a, 'b, 'c, 'd> {
+ pub origin_node: &'a Node<'b, 'c, 'd>,
+ pub expected_path: &'a [&'a Node<'b, 'c, 'd>],
+ pub recv_value: u64,
+ pub payment_hash: PaymentHash,
+ pub payment_secret: Option<PaymentSecret>,
+ pub event: MessageSendEvent,
+ pub payment_claimable_expected: bool,
+ pub clear_recipient_events: bool,
+ pub expected_preimage: Option<PaymentPreimage>,
+ pub is_probe: bool,
+ pub custom_tlvs: Vec<(u64, Vec<u8>)>,
+}
+
+impl<'a, 'b, 'c, 'd> PassAlongPathArgs<'a, 'b, 'c, 'd> {
+ pub fn new(
+ origin_node: &'a Node<'b, 'c, 'd>, expected_path: &'a [&'a Node<'b, 'c, 'd>], recv_value: u64,
+ payment_hash: PaymentHash, event: MessageSendEvent,
+ ) -> Self {
+ Self {
+ origin_node, expected_path, recv_value, payment_hash, payment_secret: None, event,
+ payment_claimable_expected: true, clear_recipient_events: true, expected_preimage: None,
+ is_probe: false, custom_tlvs: Vec::new(),
+ }
+ }
+ pub fn without_clearing_recipient_events(mut self) -> Self {
+ self.clear_recipient_events = false;
+ self
+ }
+ pub fn is_probe(mut self) -> Self {
+ self.payment_claimable_expected = false;
+ self.is_probe = true;
+ self
+ }
+ pub fn without_claimable_event(mut self) -> Self {
+ self.payment_claimable_expected = false;
+ self
+ }
+ pub fn with_payment_secret(mut self, payment_secret: PaymentSecret) -> Self {
+ self.payment_secret = Some(payment_secret);
+ self
+ }
+ pub fn with_payment_preimage(mut self, payment_preimage: PaymentPreimage) -> Self {
+ self.expected_preimage = Some(payment_preimage);
+ self
+ }
+ pub fn with_custom_tlvs(mut self, custom_tlvs: Vec<(u64, Vec<u8>)>) -> Self {
+ self.custom_tlvs = custom_tlvs;
+ self
+ }
+}
+
+pub fn do_pass_along_path<'a, 'b, 'c>(args: PassAlongPathArgs) -> Option<Event> {
+ let PassAlongPathArgs {
+ origin_node, expected_path, recv_value, payment_hash: our_payment_hash,
+ payment_secret: our_payment_secret, event: ev, payment_claimable_expected,
+ clear_recipient_events, expected_preimage, is_probe, custom_tlvs
+ } = args;
+
let mut payment_event = SendEvent::from_event(ev);
let mut prev_node = origin_node;
let mut event = None;
assert_eq!(our_payment_hash, *payment_hash);
assert_eq!(node.node.get_our_node_id(), receiver_node_id.unwrap());
assert!(onion_fields.is_some());
+ assert_eq!(onion_fields.as_ref().unwrap().custom_tlvs, custom_tlvs);
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
+ assert_eq!(expected_preimage, *payment_preimage);
+ assert_eq!(our_payment_secret.unwrap(), *payment_secret);
+ assert_eq!(Some(*payment_secret), onion_fields.as_ref().unwrap().payment_secret);
+ },
+ PaymentPurpose::Bolt12OfferPayment { payment_preimage, payment_secret, .. } => {
+ assert_eq!(expected_preimage, *payment_preimage);
+ assert_eq!(our_payment_secret.unwrap(), *payment_secret);
+ assert_eq!(Some(*payment_secret), onion_fields.as_ref().unwrap().payment_secret);
+ },
+ PaymentPurpose::Bolt12RefundPayment { payment_preimage, payment_secret, .. } => {
assert_eq!(expected_preimage, *payment_preimage);
assert_eq!(our_payment_secret.unwrap(), *payment_secret);
assert_eq!(Some(*payment_secret), onion_fields.as_ref().unwrap().payment_secret);
}
pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option<PaymentSecret>, ev: MessageSendEvent, payment_claimable_expected: bool, expected_preimage: Option<PaymentPreimage>) -> Option<Event> {
- do_pass_along_path(origin_node, expected_path, recv_value, our_payment_hash, our_payment_secret, ev, payment_claimable_expected, true, expected_preimage, false)
+ let mut args = PassAlongPathArgs::new(origin_node, expected_path, recv_value, our_payment_hash, ev);
+ if !payment_claimable_expected {
+ args = args.without_claimable_event();
+ }
+ if let Some(payment_secret) = our_payment_secret {
+ args = args.with_payment_secret(payment_secret);
+ }
+ if let Some(payment_preimage) = expected_preimage {
+ args = args.with_payment_preimage(payment_preimage);
+ }
+ do_pass_along_path(args)
}
pub fn send_probe_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&[&Node<'a, 'b, 'c>]]) {
for path in expected_route.iter() {
let ev = remove_first_msg_event_to_node(&path[0].node.get_our_node_id(), &mut events);
- do_pass_along_path(origin_node, path, 0, PaymentHash([0_u8; 32]), None, ev, false, false, None, true);
+ do_pass_along_path(PassAlongPathArgs::new(origin_node, path, 0, PaymentHash([0_u8; 32]), ev)
+ .is_probe()
+ .without_clearing_recipient_events());
+
let nodes_to_fail_payment: Vec<_> = vec![origin_node].into_iter().chain(path.iter().cloned()).collect();
fail_payment_along_path(nodes_to_fail_payment.as_slice());
origin_node: &Node<'a, 'b, 'c>, expected_paths: &[&[&Node<'a, 'b, 'c>]], skip_last: bool,
our_payment_preimage: PaymentPreimage
) -> u64 {
- let extra_fees = vec![0; expected_paths.len()];
- do_claim_payment_along_route_with_extra_penultimate_hop_fees(origin_node, expected_paths,
- &extra_fees[..], skip_last, our_payment_preimage)
-}
-
-pub fn do_claim_payment_along_route_with_extra_penultimate_hop_fees<'a, 'b, 'c>(
- origin_node: &Node<'a, 'b, 'c>, expected_paths: &[&[&Node<'a, 'b, 'c>]], expected_extra_fees:
- &[u32], skip_last: bool, our_payment_preimage: PaymentPreimage
-) -> u64 {
- assert_eq!(expected_paths.len(), expected_extra_fees.len());
for path in expected_paths.iter() {
assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id());
}
expected_paths[0].last().unwrap().node.claim_funds(our_payment_preimage);
- pass_claimed_payment_along_route(origin_node, expected_paths, expected_extra_fees, skip_last, our_payment_preimage)
+ pass_claimed_payment_along_route(
+ ClaimAlongRouteArgs::new(origin_node, expected_paths, our_payment_preimage)
+ .skip_last(skip_last)
+ )
}
-pub fn pass_claimed_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_paths: &[&[&Node<'a, 'b, 'c>]], expected_extra_fees: &[u32], skip_last: bool, our_payment_preimage: PaymentPreimage) -> u64 {
+pub struct ClaimAlongRouteArgs<'a, 'b, 'c, 'd> {
+ pub origin_node: &'a Node<'b, 'c, 'd>,
+ pub expected_paths: &'a [&'a [&'a Node<'b, 'c, 'd>]],
+ pub expected_extra_fees: Vec<u32>,
+ pub expected_min_htlc_overpay: Vec<u32>,
+ pub skip_last: bool,
+ pub payment_preimage: PaymentPreimage,
+ // Allow forwarding nodes to have taken 1 msat more fee than expected based on the downstream
+ // fulfill amount.
+ //
+ // Necessary because our test utils calculate the expected fee for an intermediate node based on
+ // the amount was claimed in their downstream peer's fulfill, but blinded intermediate nodes
+ // calculate their fee based on the inbound amount from their upstream peer, causing a difference
+ // in rounding.
+ pub allow_1_msat_fee_overpay: bool,
+}
+
+impl<'a, 'b, 'c, 'd> ClaimAlongRouteArgs<'a, 'b, 'c, 'd> {
+ pub fn new(
+ origin_node: &'a Node<'b, 'c, 'd>, expected_paths: &'a [&'a [&'a Node<'b, 'c, 'd>]],
+ payment_preimage: PaymentPreimage,
+ ) -> Self {
+ Self {
+ origin_node, expected_paths, expected_extra_fees: vec![0; expected_paths.len()],
+ expected_min_htlc_overpay: vec![0; expected_paths.len()], skip_last: false, payment_preimage,
+ allow_1_msat_fee_overpay: false,
+ }
+ }
+ pub fn skip_last(mut self, skip_last: bool) -> Self {
+ self.skip_last = skip_last;
+ self
+ }
+ pub fn with_expected_extra_fees(mut self, extra_fees: Vec<u32>) -> Self {
+ self.expected_extra_fees = extra_fees;
+ self
+ }
+ pub fn with_expected_min_htlc_overpay(mut self, extra_fees: Vec<u32>) -> Self {
+ self.expected_min_htlc_overpay = extra_fees;
+ self
+ }
+ pub fn allow_1_msat_fee_overpay(mut self) -> Self {
+ self.allow_1_msat_fee_overpay = true;
+ self
+ }
+}
+
+pub fn pass_claimed_payment_along_route<'a, 'b, 'c, 'd>(args: ClaimAlongRouteArgs) -> u64 {
+ let ClaimAlongRouteArgs {
+ origin_node, expected_paths, expected_extra_fees, expected_min_htlc_overpay, skip_last,
+ payment_preimage: our_payment_preimage, allow_1_msat_fee_overpay,
+ } = args;
let claim_event = expected_paths[0].last().unwrap().node.get_and_clear_pending_events();
assert_eq!(claim_event.len(), 1);
+ #[allow(unused)]
+ let mut fwd_amt_msat = 0;
match claim_event[0] {
Event::PaymentClaimed {
- purpose: PaymentPurpose::SpontaneousPayment(preimage),
+ purpose: PaymentPurpose::SpontaneousPayment(preimage)
+ | PaymentPurpose::Bolt11InvoicePayment { payment_preimage: Some(preimage), .. }
+ | PaymentPurpose::Bolt12OfferPayment { payment_preimage: Some(preimage), .. }
+ | PaymentPurpose::Bolt12RefundPayment { payment_preimage: Some(preimage), .. },
amount_msat,
ref htlcs,
- .. }
- | Event::PaymentClaimed {
- purpose: PaymentPurpose::InvoicePayment { payment_preimage: Some(preimage), ..},
- ref htlcs,
- amount_msat,
..
} => {
assert_eq!(preimage, our_payment_preimage);
assert_eq!(htlcs.len(), expected_paths.len()); // One per path.
assert_eq!(htlcs.iter().map(|h| h.value_msat).sum::<u64>(), amount_msat);
expected_paths.iter().zip(htlcs).for_each(|(path, htlc)| check_claimed_htlc_channel(origin_node, path, htlc));
+ fwd_amt_msat = amount_msat;
},
Event::PaymentClaimed {
- purpose: PaymentPurpose::InvoicePayment { .. },
+ purpose: PaymentPurpose::Bolt11InvoicePayment { .. }
+ | PaymentPurpose::Bolt12OfferPayment { .. }
+ | PaymentPurpose::Bolt12RefundPayment { .. },
payment_hash,
amount_msat,
ref htlcs,
assert_eq!(htlcs.len(), expected_paths.len()); // One per path.
assert_eq!(htlcs.iter().map(|h| h.value_msat).sum::<u64>(), amount_msat);
expected_paths.iter().zip(htlcs).for_each(|(path, htlc)| check_claimed_htlc_channel(origin_node, path, htlc));
+ fwd_amt_msat = amount_msat;
}
_ => panic!(),
}
per_path_msgs.push(msgs_from_ev!(&events[0]));
} else {
for expected_path in expected_paths.iter() {
- // For MPP payments, we always want the message to the first node in the path.
- let ev = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &mut events);
+ // For MPP payments, we want the fulfill message from the payee to the penultimate hop in the
+ // path.
+ let penultimate_hop_node_id = expected_path.iter().rev().skip(1).next()
+ .map(|n| n.node.get_our_node_id())
+ .unwrap_or(origin_node.node.get_our_node_id());
+ let ev = remove_first_msg_event_to_node(&penultimate_hop_node_id, &mut events);
per_path_msgs.push(msgs_from_ev!(&ev));
}
}
{
$node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0);
let mut fee = {
- let per_peer_state = $node.node.per_peer_state.read().unwrap();
- let peer_state = per_peer_state.get(&$prev_node.node.get_our_node_id())
- .unwrap().lock().unwrap();
- let channel = peer_state.channel_by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap();
- if let Some(prev_config) = channel.context().prev_config() {
- prev_config.forwarding_fee_base_msat
- } else {
- channel.context().config().forwarding_fee_base_msat
- }
+ let (base_fee, prop_fee) = {
+ let per_peer_state = $node.node.per_peer_state.read().unwrap();
+ let peer_state = per_peer_state.get(&$prev_node.node.get_our_node_id())
+ .unwrap().lock().unwrap();
+ let channel = peer_state.channel_by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap();
+ if let Some(prev_config) = channel.context().prev_config() {
+ (prev_config.forwarding_fee_base_msat as u64,
+ prev_config.forwarding_fee_proportional_millionths as u64)
+ } else {
+ (channel.context().config().forwarding_fee_base_msat as u64,
+ channel.context().config().forwarding_fee_proportional_millionths as u64)
+ }
+ };
+ ((fwd_amt_msat * prop_fee / 1_000_000) + base_fee) as u32
};
- if $idx == 1 { fee += expected_extra_fees[i]; }
- expect_payment_forwarded!(*$node, $next_node, $prev_node, Some(fee as u64), false, false);
- expected_total_fee_msat += fee as u64;
+
+ let mut expected_extra_fee = None;
+ if $idx == 1 {
+ fee += expected_extra_fees[i];
+ fee += expected_min_htlc_overpay[i];
+ expected_extra_fee = if expected_extra_fees[i] > 0 { Some(expected_extra_fees[i] as u64) } else { None };
+ }
+ let mut events = $node.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ let actual_fee = expect_payment_forwarded(events.pop().unwrap(), *$node, $next_node, $prev_node,
+ Some(fee as u64), expected_extra_fee, false, false, allow_1_msat_fee_overpay);
+ expected_total_fee_msat += actual_fee.unwrap();
+ fwd_amt_msat += actual_fee.unwrap();
check_added_monitors!($node, 1);
let new_next_msgs = if $new_msgs {
let events = $node.node.get_and_clear_pending_msg_events();
logger: &chanmon_cfgs[i].logger,
tx_broadcaster: &chanmon_cfgs[i].tx_broadcaster,
fee_estimator: &chanmon_cfgs[i].fee_estimator,
- router: test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[i].scorer),
+ router: test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[i].logger, &chanmon_cfgs[i].scorer),
+ message_router: test_utils::TestMessageRouter::new(network_graph.clone(), &chanmon_cfgs[i].keys_manager),
chain_monitor,
keys_manager: &chanmon_cfgs[i].keys_manager,
node_seed: seed,
let connect_style = Rc::new(RefCell::new(ConnectStyle::random_style()));
for i in 0..node_count {
+ let dedicated_entropy = DedicatedEntropy(RandomBytes::new([i as u8; 32]));
+ let onion_messenger = OnionMessenger::new(
+ dedicated_entropy, cfgs[i].keys_manager, cfgs[i].logger, &chan_mgrs[i],
+ &cfgs[i].message_router, &chan_mgrs[i], IgnoringMessageHandler {},
+ );
let gossip_sync = P2PGossipSync::new(cfgs[i].network_graph.as_ref(), None, cfgs[i].logger);
let wallet_source = Arc::new(test_utils::TestWalletSource::new(SecretKey::from_slice(&[i as u8 + 1; 32]).unwrap()));
nodes.push(Node{
fee_estimator: cfgs[i].fee_estimator, router: &cfgs[i].router,
chain_monitor: &cfgs[i].chain_monitor, keys_manager: &cfgs[i].keys_manager,
node: &chan_mgrs[i], network_graph: cfgs[i].network_graph.as_ref(), gossip_sync,
- node_seed: cfgs[i].node_seed, network_chan_count: chan_count.clone(),
+ node_seed: cfgs[i].node_seed, onion_messenger, network_chan_count: chan_count.clone(),
network_payment_count: payment_count.clone(), logger: cfgs[i].logger,
blocks: Arc::clone(&cfgs[i].tx_broadcaster.blocks),
connect_style: Rc::clone(&connect_style),
for i in 0..node_count {
for j in (i+1)..node_count {
- nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &msgs::Init {
- features: nodes[j].override_init_features.borrow().clone().unwrap_or_else(|| nodes[j].node.init_features()),
+ let node_id_i = nodes[i].node.get_our_node_id();
+ let node_id_j = nodes[j].node.get_our_node_id();
+
+ let init_i = msgs::Init {
+ features: nodes[i].init_features(&node_id_j),
networks: None,
remote_network_address: None,
- }, true).unwrap();
- nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &msgs::Init {
- features: nodes[i].override_init_features.borrow().clone().unwrap_or_else(|| nodes[i].node.init_features()),
+ };
+ let init_j = msgs::Init {
+ features: nodes[j].init_features(&node_id_i),
networks: None,
remote_network_address: None,
- }, false).unwrap();
+ };
+
+ nodes[i].node.peer_connected(&node_id_j, &init_j, true).unwrap();
+ nodes[j].node.peer_connected(&node_id_i, &init_i, false).unwrap();
+ nodes[i].onion_messenger.peer_connected(&node_id_j, &init_j, true).unwrap();
+ nodes[j].onion_messenger.peer_connected(&node_id_i, &init_i, false).unwrap();
}
}
nodes
}
+pub fn connect_dummy_node<'a, 'b: 'a, 'c: 'b>(node: &Node<'a, 'b, 'c>) {
+ let node_id_dummy = PublicKey::from_slice(&[2; 33]).unwrap();
+
+ let mut dummy_init_features = InitFeatures::empty();
+ dummy_init_features.set_static_remote_key_required();
+
+ let init_dummy = msgs::Init {
+ features: dummy_init_features,
+ networks: None,
+ remote_network_address: None
+ };
+
+ node.node.peer_connected(&node_id_dummy, &init_dummy, true).unwrap();
+ node.onion_messenger.peer_connected(&node_id_dummy, &init_dummy, true).unwrap();
+}
+
+pub fn disconnect_dummy_node<'a, 'b: 'a, 'c: 'b>(node: &Node<'a, 'b, 'c>) {
+ let node_id_dummy = PublicKey::from_slice(&[2; 33]).unwrap();
+ node.node.peer_disconnected(&node_id_dummy);
+ node.onion_messenger.peer_disconnected(&node_id_dummy);
+}
+
// Note that the following only works for CLTV values up to 128
pub const ACCEPTED_HTLC_SCRIPT_WEIGHT: usize = 137; // Here we have a diff due to HTLC CLTV expiry being < 2^15 in test
pub const ACCEPTED_HTLC_SCRIPT_WEIGHT_ANCHORS: usize = 140; // Here we have a diff due to HTLC CLTV expiry being < 2^15 in test
/// also fail.
pub fn test_txn_broadcast<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction), commitment_tx: Option<Transaction>, has_htlc_tx: HTLCType) -> Vec<Transaction> {
let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
- let mut txn_seen = HashSet::new();
+ let mut txn_seen = new_hash_set();
node_txn.retain(|tx| txn_seen.insert(tx.txid()));
assert!(node_txn.len() >= if commitment_tx.is_some() { 0 } else { 1 } + if has_htlc_tx == HTLCType::NONE { 0 } else { 1 });
pub fn check_preimage_claim<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, prev_txn: &Vec<Transaction>) -> Vec<Transaction> {
let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
- let mut txn_seen = HashSet::new();
+ let mut txn_seen = new_hash_set();
node_txn.retain(|tx| txn_seen.insert(tx.txid()));
let mut found_prev = false;
}
pub fn handle_announce_close_broadcast_events<'a, 'b, 'c>(nodes: &Vec<Node<'a, 'b, 'c>>, a: usize, b: usize, needs_err_handle: bool, expected_error: &str) {
+ let mut dummy_connected = false;
+ if !is_any_peer_connected(&nodes[a]) {
+ connect_dummy_node(&nodes[a]);
+ dummy_connected = true
+ }
+
let events_1 = nodes[a].node.get_and_clear_pending_msg_events();
assert_eq!(events_1.len(), 2);
- let as_update = match events_1[0] {
+ let as_update = match events_1[1] {
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
msg.clone()
},
_ => panic!("Unexpected event"),
};
- match events_1[1] {
+ match events_1[0] {
MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
assert_eq!(node_id, nodes[b].node.get_our_node_id());
assert_eq!(msg.data, expected_error);
},
_ => panic!("Unexpected event"),
}
-
+ if dummy_connected {
+ disconnect_dummy_node(&nodes[a]);
+ dummy_connected = false;
+ }
+ if !is_any_peer_connected(&nodes[b]) {
+ connect_dummy_node(&nodes[b]);
+ dummy_connected = true;
+ }
let events_2 = nodes[b].node.get_and_clear_pending_msg_events();
assert_eq!(events_2.len(), if needs_err_handle { 1 } else { 2 });
- let bs_update = match events_2[0] {
+ let bs_update = match events_2.last().unwrap() {
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
msg.clone()
},
_ => panic!("Unexpected event"),
};
if !needs_err_handle {
- match events_2[1] {
+ match events_2[0] {
MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
assert_eq!(node_id, nodes[a].node.get_our_node_id());
assert_eq!(msg.data, expected_error);
_ => panic!("Unexpected event"),
}
}
-
+ if dummy_connected {
+ disconnect_dummy_node(&nodes[b]);
+ }
for node in nodes {
node.gossip_sync.handle_channel_update(&as_update).unwrap();
node.gossip_sync.handle_channel_update(&bs_update).unwrap();
macro_rules! get_chan_reestablish_msgs {
($src_node: expr, $dst_node: expr) => {
{
- let mut announcements = $crate::prelude::HashSet::new();
+ let mut announcements = $crate::prelude::new_hash_set();
let mut res = Vec::with_capacity(1);
for msg in $src_node.node.get_and_clear_pending_msg_events() {
if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg {
use crate::chain::channelmonitor;
use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
use crate::chain::transaction::OutPoint;
-use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, SignerProvider};
+use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider};
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
use crate::ln::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase};
use bitcoin::secp256k1::Secp256k1;
use bitcoin::secp256k1::{PublicKey,SecretKey};
-use regex;
-
use crate::io;
use crate::prelude::*;
use alloc::collections::BTreeSet;
-use core::default::Default;
use core::iter::repeat;
use bitcoin::hashes::Hash;
use crate::sync::{Arc, Mutex, RwLock};
use crate::ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
// Test all mutations that would make the channel open message insane
- insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
- insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
+ insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
+ insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
- insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.funding_satoshis + 1; msg });
+ insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.common_fields.funding_satoshis + 1; msg });
- insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
+ insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
- insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg });
+ insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.common_fields.dust_limit_satoshis = msg.common_fields.funding_satoshis + 1 ; msg });
- insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
+ insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.common_fields.htlc_minimum_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
- insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
+ insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.common_fields.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
- insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.max_accepted_htlcs = 0; msg });
+ insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.common_fields.max_accepted_htlcs = 0; msg });
- insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg });
+ insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.common_fields.max_accepted_htlcs = 484; msg });
}
#[test]
let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
if !send_from_initiator {
open_channel_message.channel_reserve_satoshis = 0;
- open_channel_message.max_htlc_value_in_flight_msat = 100_000_000;
+ open_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000;
}
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
if send_from_initiator {
accept_channel_message.channel_reserve_satoshis = 0;
- accept_channel_message.max_htlc_value_in_flight_msat = 100_000_000;
+ accept_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000;
}
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
{
chan_context.holder_selected_channel_reserve_satoshis = 0;
chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000;
},
- ChannelPhase::Funded(_) => assert!(false),
+ _ => assert!(false),
}
}
send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30);
assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30);
close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
// Close down the channels...
close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
- check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[3], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[3], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
let secp_ctx = Secp256k1::new();
let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!");
- let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
+ let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
// Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
let secp_ctx = Secp256k1::new();
let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
- let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
+ let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
700_000, RecipientOnionFields::secret_only(payment_secret), cur_height, &None).unwrap();
// Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
let secp_ctx = Secp256k1::new();
let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
- let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
+ let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap();
let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
&route_2.paths[0], recv_value_2, RecipientOnionFields::spontaneous_empty(), cur_height, &None).unwrap();
assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
assert_eq!(via_channel_id, Some(chan_2.2));
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
assert_eq!(our_payment_secret_21, *payment_secret);
},
- _ => panic!("expected PaymentPurpose::InvoicePayment")
+ _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
}
},
_ => panic!("Unexpected event"),
assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
assert_eq!(via_channel_id, Some(chan_2.2));
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
assert_eq!(our_payment_secret_22, *payment_secret);
},
- _ => panic!("expected PaymentPurpose::InvoicePayment")
+ _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
}
},
_ => panic!("Unexpected event"),
connect_blocks(&nodes[3], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
let events = nodes[3].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 2);
- let close_chan_update_1 = match events[0] {
+ let close_chan_update_1 = match events[1] {
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
msg.clone()
},
_ => panic!("Unexpected event"),
};
- match events[1] {
+ match events[0] {
MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
assert_eq!(node_id, nodes[4].node.get_our_node_id());
},
connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
let events = nodes[4].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 2);
- let close_chan_update_2 = match events[0] {
+ let close_chan_update_2 = match events[1] {
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
msg.clone()
},
_ => panic!("Unexpected event"),
};
- match events[1] {
+ match events[0] {
MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
assert_eq!(node_id, nodes[3].node.get_our_node_id());
},
}
check_added_monitors!(nodes[4], 1);
test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
- check_closed_event!(nodes[4], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[4], 1, ClosureReason::HTLCsTimedOut, [nodes[3].node.get_our_node_id()], 100000);
mine_transaction(&nodes[4], &node_txn[0]);
check_preimage_claim(&nodes[4], &node_txn);
assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
Ok(ChannelMonitorUpdateStatus::Completed));
- check_closed_event!(nodes[3], 1, ClosureReason::HolderForceClosed, [nodes[4].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [nodes[4].node.get_our_node_id()], 100000);
}
#[test]
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
let mut events = nodes[0].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
+ expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
match events.last().unwrap() {
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
_ => panic!("Unexpected event"),
}
let chan_id = Some(chan_1.2);
match forwarded_events[1] {
- Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
- assert_eq!(fee_earned_msat, Some(1000));
+ Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
+ next_channel_id, outbound_amount_forwarded_msat, ..
+ } => {
+ assert_eq!(total_fee_earned_msat, Some(1000));
assert_eq!(prev_channel_id, chan_id);
assert_eq!(claim_from_onchain_tx, true);
assert_eq!(next_channel_id, Some(chan_2.2));
_ => panic!()
}
match forwarded_events[2] {
- Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
- assert_eq!(fee_earned_msat, Some(1000));
+ Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
+ next_channel_id, outbound_amount_forwarded_msat, ..
+ } => {
+ assert_eq!(total_fee_earned_msat, Some(1000));
assert_eq!(prev_channel_id, chan_id);
assert_eq!(claim_from_onchain_tx, true);
assert_eq!(next_channel_id, Some(chan_2.2));
let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
match events[0] {
- Event::PendingHTLCsForwardable { .. } => { },
- _ => panic!("Unexpected event"),
- };
- match events[1] {
Event::HTLCHandlingFailed { .. } => { },
_ => panic!("Unexpected event"),
}
+ match events[1] {
+ Event::PendingHTLCsForwardable { .. } => { },
+ _ => panic!("Unexpected event"),
+ };
// Deliberately don't process the pending fail-back so they all fail back at once after
// block connection just like the !deliver_bs_raa case
}
- let mut failed_htlcs = HashSet::new();
+ let mut failed_htlcs = new_hash_set();
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
mine_transaction(&nodes[1], &revoked_local_txn[0]);
let secp_ctx = Secp256k1::new();
let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
- let current_height = nodes[1].node.best_block.read().unwrap().height() + 1;
+ let current_height = nodes[1].node.best_block.read().unwrap().height + 1;
let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(
&route.paths[0], 50_000, RecipientOnionFields::secret_only(payment_secret), current_height, &None).unwrap();
let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
#[test]
fn test_peer_disconnected_before_funding_broadcasted() {
// Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects
- // before the funding transaction has been broadcasted.
+ // before the funding transaction has been broadcasted, and doesn't reconnect back within time.
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
}
- // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` when the peers are
- // disconnected before the funding transaction was broadcasted.
+ // The peers disconnect before the funding is broadcasted.
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
- check_closed_event!(&nodes[0], 2, ClosureReason::DisconnectedPeer, true
+ // The time for peers to reconnect expires.
+ for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
+ nodes[0].node.timer_tick_occurred();
+ }
+
+ // Ensure that the channel is closed with `ClosureReason::HolderForceClosed`
+ // when the peers are disconnected and do not reconnect before the funding
+ // transaction is broadcasted.
+ check_closed_event!(&nodes[0], 2, ClosureReason::HolderForceClosed, true
, [nodes[1].node.get_our_node_id()], 1000000);
check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false
, [nodes[0].node.get_our_node_id()], 1000000);
assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
assert_eq!(via_channel_id, Some(channel_id));
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
assert_eq!(payment_secret_1, *payment_secret);
},
- _ => panic!("expected PaymentPurpose::InvoicePayment")
+ _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
}
},
_ => panic!("Unexpected event"),
Event::PaymentClaimable { ref payment_hash, ref purpose, .. } => {
assert_eq!(payment_hash_2, *payment_hash);
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
assert_eq!(payment_secret_2, *payment_secret);
},
- _ => panic!("expected PaymentPurpose::InvoicePayment")
+ _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
}
},
_ => panic!("Unexpected event"),
MessageSendEvent::UpdateHTLCs { .. } => {},
_ => panic!("Unexpected event"),
}
- match events[1] {
+ match events[2] {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("Unexepected event"),
}
mine_transaction(&nodes[1], &commitment_tx[0]);
check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_msg_events();
- match events[0] {
+ match events[1] {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("Unexpected event"),
}
_ => panic!("Unexpected event"),
}
match events[1] {
- Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
- assert_eq!(fee_earned_msat, Some(1000));
+ Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
+ next_channel_id, outbound_amount_forwarded_msat, ..
+ } => {
+ assert_eq!(total_fee_earned_msat, Some(1000));
assert_eq!(prev_channel_id, Some(chan_1.2));
assert_eq!(claim_from_onchain_tx, true);
assert_eq!(next_channel_id, Some(chan_2.2));
MessageSendEvent::UpdateHTLCs { .. } => {},
_ => panic!("Unexpected event"),
}
- match events[1] {
+ match events[2] {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("Unexepected event"),
}
MessageSendEvent::UpdateHTLCs { .. } => {},
_ => panic!("Unexpected event"),
}
- match events[1] {
+ match events[2] {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("Unexepected event"),
}
connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
check_closed_broadcast!(nodes[2], true);
if deliver_last_raa {
- expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
+ expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true);
let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
let as_events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 });
- let mut as_failds = HashSet::new();
+ let mut as_faileds = new_hash_set();
let mut as_updates = 0;
for event in as_events.iter() {
if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
- assert!(as_failds.insert(*payment_hash));
+ assert!(as_faileds.insert(*payment_hash));
if *payment_hash != payment_hash_2 {
assert_eq!(*payment_failed_permanently, deliver_last_raa);
} else {
} else if let &Event::PaymentFailed { .. } = event {
} else { panic!("Unexpected event"); }
}
- assert!(as_failds.contains(&payment_hash_1));
- assert!(as_failds.contains(&payment_hash_2));
+ assert!(as_faileds.contains(&payment_hash_1));
+ assert!(as_faileds.contains(&payment_hash_2));
if announce_latest {
- assert!(as_failds.contains(&payment_hash_3));
- assert!(as_failds.contains(&payment_hash_5));
+ assert!(as_faileds.contains(&payment_hash_3));
+ assert!(as_faileds.contains(&payment_hash_5));
}
- assert!(as_failds.contains(&payment_hash_6));
+ assert!(as_faileds.contains(&payment_hash_6));
let bs_events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 });
- let mut bs_failds = HashSet::new();
+ let mut bs_faileds = new_hash_set();
let mut bs_updates = 0;
for event in bs_events.iter() {
if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
- assert!(bs_failds.insert(*payment_hash));
+ assert!(bs_faileds.insert(*payment_hash));
if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
assert_eq!(*payment_failed_permanently, deliver_last_raa);
} else {
} else if let &Event::PaymentFailed { .. } = event {
} else { panic!("Unexpected event"); }
}
- assert!(bs_failds.contains(&payment_hash_1));
- assert!(bs_failds.contains(&payment_hash_2));
+ assert!(bs_faileds.contains(&payment_hash_1));
+ assert!(bs_faileds.contains(&payment_hash_2));
if announce_latest {
- assert!(bs_failds.contains(&payment_hash_4));
+ assert!(bs_faileds.contains(&payment_hash_4));
}
- assert!(bs_failds.contains(&payment_hash_5));
+ assert!(bs_faileds.contains(&payment_hash_5));
// For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should
// get a NetworkUpdate. A should have gotten 4 HTLCs which were failed-back due to
let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
- let router = test_utils::TestRouter::new(network_graph.clone(), &scorer);
- let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
+ let router = test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[0].logger, &scorer);
+ let message_router = test_utils::TestMessageRouter::new(network_graph.clone(), &keys_manager);
+ let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, message_router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
node_cfgs.remove(0);
node_cfgs.insert(0, node);
let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
mine_transaction(&nodes[0], &closing_tx);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
check_spends!(spend_txn[0], closing_tx);
mine_transaction(&nodes[1], &closing_tx);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 100000);
}
fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
}
fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
} else {
expect_payment_failed!(nodes[0], our_payment_hash, true);
}
let push_msat=10001;
assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_ok()); //Create a valid channel
let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.dust_limit_satoshis);
+ assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.common_fields.dust_limit_satoshis);
// BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0
// Only the least-significant bit of channel_flags is currently defined resulting in channel_flags only having one of two possible states 0 or 1
- assert!(node0_to_1_send_open_channel.channel_flags<=1);
+ assert!(node0_to_1_send_open_channel.common_fields.channel_flags<=1);
// BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver.
assert!(BREAKDOWN_TIMEOUT>0);
- assert!(node0_to_1_send_open_channel.to_self_delay==BREAKDOWN_TIMEOUT);
+ assert!(node0_to_1_send_open_channel.common_fields.to_self_delay==BREAKDOWN_TIMEOUT);
// BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within.
let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
- assert_eq!(node0_to_1_send_open_channel.chain_hash, chain_hash);
+ assert_eq!(node0_to_1_send_open_channel.common_fields.chain_hash, chain_hash);
// BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys.
- assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.funding_pubkey.serialize()).is_ok());
- assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.revocation_basepoint.serialize()).is_ok());
- assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.htlc_basepoint.serialize()).is_ok());
- assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.payment_point.serialize()).is_ok());
- assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.delayed_payment_basepoint.serialize()).is_ok());
+ assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.funding_pubkey.serialize()).is_ok());
+ assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.revocation_basepoint.serialize()).is_ok());
+ assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.htlc_basepoint.serialize()).is_ok());
+ assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.payment_basepoint.serialize()).is_ok());
+ assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.delayed_payment_basepoint.serialize()).is_ok());
}
#[test]
let push_msat=10001;
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap();
let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- node0_to_1_send_open_channel.dust_limit_satoshis = 547;
+ node0_to_1_send_open_channel.common_fields.dust_limit_satoshis = 547;
node0_to_1_send_open_channel.channel_reserve_satoshis = 100001;
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
// nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
assert_eq!(process_htlc_forwards_event.len(), 2);
- match &process_htlc_forwards_event[0] {
+ match &process_htlc_forwards_event[1] {
&Event::PendingHTLCsForwardable { .. } => {},
_ => panic!("Unexpected event"),
}
get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
route.paths[0].hops[0].fee_msat = send_amt;
let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
- let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
+ let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap();
let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
&route.paths[0], send_amt, RecipientOnionFields::secret_only(our_payment_secret), cur_height, &None).unwrap();
// We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new()
nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
- open_channel.to_self_delay = 200;
+ open_channel.common_fields.to_self_delay = 200;
if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
&nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
&low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
- accept_channel.to_self_delay = 200;
+ accept_channel.common_fields.to_self_delay = 200;
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
let reason_msg;
if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
// We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new()
nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
- open_channel.to_self_delay = 200;
+ open_channel.common_fields.to_self_delay = 200;
if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
&nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
&high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ // Connect a dummy node for proper future events broadcasting
+ connect_dummy_node(&nodes[0]);
+
create_announced_chan_between_nodes(&nodes, 0, 1);
create_announced_chan_between_nodes(&nodes, 1, 0);
create_announced_chan_between_nodes(&nodes, 0, 1);
}
let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 3);
- let mut chans_disabled = HashMap::new();
+ let mut chans_disabled = new_hash_map();
for e in msg_events {
match e {
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
let route = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None,
nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
- send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000);
+ let failed_payment_hash = send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000).1;
let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
assert_eq!(revoked_local_txn[0].input.len(), 1);
let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]);
connect_block(&nodes[0], &block_129);
let events = nodes[0].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
+ expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]);
match events.last().unwrap() {
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
_ => panic!("Unexpected event"),
// Assert the channel created by node0 is using the override config.
let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- assert_eq!(res.channel_flags, 0);
- assert_eq!(res.to_self_delay, 200);
+ assert_eq!(res.common_fields.channel_flags, 0);
+ assert_eq!(res.common_fields.to_self_delay, 200);
}
#[test]
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(zero_config)).unwrap();
let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- assert_eq!(res.htlc_minimum_msat, 1);
+ assert_eq!(res.common_fields.htlc_minimum_msat, 1);
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
- assert_eq!(res.htlc_minimum_msat, 1);
+ assert_eq!(res.common_fields.htlc_minimum_msat, 1);
}
#[test]
match events[0] {
Event::PaymentClaimable { ref purpose, .. } => {
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, .. } => {
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap());
},
- _ => panic!("expected PaymentPurpose::InvoicePayment")
+ _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
}
},
_ => panic!("Unexpected event"),
let height = HTLC_TIMEOUT_BROADCAST + 1;
connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
check_closed_broadcast(&nodes[0], 1, true);
- check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed, false,
+ check_closed_event!(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false,
[nodes[1].node.get_our_node_id()], 100000);
watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
check_added_monitors(&nodes[0], 1);
check_added_monitors!(nodes[0], 0);
let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
- let channel_id = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
+ let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index });
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true,
// Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same
// `temporary_channel_id` as the `OpenChannel` from nodes[1] to nodes[0].
- open_chan_msg_chan_2_0.temporary_channel_id = open_chan_msg_chan_1_0.temporary_channel_id;
+ open_chan_msg_chan_2_0.common_fields.temporary_channel_id = open_chan_msg_chan_1_0.common_fields.temporary_channel_id;
// Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same
// `temporary_channel_id` as they are from different peers.
match &events[0] {
MessageSendEvent::SendAcceptChannel { node_id, msg } => {
assert_eq!(node_id, &nodes[1].node.get_our_node_id());
- assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id);
+ assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id);
},
_ => panic!("Unexpected event"),
}
match &events[0] {
MessageSendEvent::SendAcceptChannel { node_id, msg } => {
assert_eq!(node_id, &nodes[2].node.get_our_node_id());
- assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id);
+ assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id);
},
_ => panic!("Unexpected event"),
}
check_added_monitors!(nodes[1], 1);
expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
let reason = ClosureReason::ProcessingError { err: format!("An existing channel using outpoint {} is open with peer {}", funding_output, nodes[2].node.get_our_node_id()), };
- check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(funding_output.to_channel_id(), true, reason)]);
+ check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(ChannelId::v1_from_funding_outpoint(funding_output), true, reason)]);
let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
let (_, _, _, real_channel_id, funding_tx) = create_chan_between_nodes(&nodes[0], &nodes[1]);
let real_chan_funding_txo = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 };
- assert_eq!(real_chan_funding_txo.to_channel_id(), real_channel_id);
+ assert_eq!(ChannelId::v1_from_funding_outpoint(real_chan_funding_txo), real_channel_id);
nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- let node_c_temp_chan_id = open_chan_msg.temporary_channel_id;
- open_chan_msg.temporary_channel_id = real_channel_id;
+ let node_c_temp_chan_id = open_chan_msg.common_fields.temporary_channel_id;
+ open_chan_msg.common_fields.temporary_channel_id = real_channel_id;
nodes[1].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg);
let mut accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[2].node.get_our_node_id());
- accept_chan_msg.temporary_channel_id = node_c_temp_chan_id;
+ accept_chan_msg.common_fields.temporary_channel_id = node_c_temp_chan_id;
nodes[2].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
// Now that we have a second channel with the same funding txo, send a bogus funding message
// first (valid) and second (invalid) channels are closed, given they both have
// the same non-temporary channel_id. However, currently we do not, so we just
// move forward with it.
- assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
+ assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id);
assert_eq!(node_id, nodes[0].node.get_our_node_id());
},
_ => panic!("Unexpected event"),
let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index };
- let channel_id = funding_outpoint.to_channel_id();
+ let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
// Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a
// temporary one).
// First try to open a second channel with a temporary channel id equal to the txid-based one.
// Technically this is allowed by the spec, but we don't support it and there's little reason
// to. Still, it shouldn't cause any other issues.
- open_chan_msg.temporary_channel_id = channel_id;
+ open_chan_msg.common_fields.temporary_channel_id = channel_id;
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
{
let events = nodes[1].node.get_and_clear_pending_msg_events();
MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
// Technically, at this point, nodes[1] would be justified in thinking both
// channels are closed, but currently we do not, so we just move forward with it.
- assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
+ assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id);
assert_eq!(node_id, nodes[0].node.get_our_node_id());
},
_ => panic!("Unexpected event"),
// another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
// try to create another channel. Instead, we drop the channel entirely here (leaving the
// channelmanager in a possibly nonsense state instead).
- match a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap() {
+ match a_peer_state.channel_by_id.remove(&open_chan_2_msg.common_fields.temporary_channel_id).unwrap() {
ChannelPhase::UnfundedOutboundV1(mut chan) => {
let logger = test_utils::TestLogger::new();
chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap()
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- open_channel.max_htlc_value_in_flight_msat = 50_000_000;
- open_channel.max_accepted_htlcs = 60;
+ open_channel.common_fields.max_htlc_value_in_flight_msat = 50_000_000;
+ open_channel.common_fields.max_accepted_htlcs = 60;
if on_holder_tx {
- open_channel.dust_limit_satoshis = 546;
+ open_channel.common_fields.dust_limit_satoshis = 546;
}
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
(chan.context().get_dust_buffer_feerate(None) as u64,
chan.context().get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
};
- let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
+ let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - 1) * 1000;
let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
- let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
+ // Substract 3 sats for multiplier and 2 sats for fixed limit to make sure we are 50% below the dust limit.
+ // This is to make sure we fully use the dust limit. If we don't, we could end up with `dust_ibd_htlc_on_holder_tx` being 1
+ // while `max_dust_htlc_exposure_msat` is not equal to `dust_outbound_htlc_on_holder_tx_msat`.
+ let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000;
let dust_inbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
let dust_htlc_on_counterparty_tx: u64 = 4;
let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
- let best_height = nodes[0].node.best_block.read().unwrap().height();
+ let best_height = nodes[0].node.best_block.read().unwrap().height;
let chan_id = *nodes[0].network_chan_count.borrow();
let events = nodes[0].node.get_and_clear_pending_events();
let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
- let best_height = nodes[0].node.best_block.read().unwrap().height();
+ let best_height = nodes[0].node.best_block.read().unwrap().height;
let chan_id = *nodes[0].network_chan_count.borrow();
let events = nodes[0].node.get_and_clear_pending_events();
check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
}
+#[test]
+fn test_channel_close_when_not_timely_accepted() {
+ // Create network of two nodes
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Simulate peer-disconnects mid-handshake
+ // The channel is initiated from the node 0 side,
+ // but the nodes disconnect before node 1 could send accept channel
+ let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
+ let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+ // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
+ assert_eq!(nodes[0].node.list_channels().len(), 1);
+
+ // Since channel was inbound from node[1] perspective, it should have been dropped immediately.
+ assert_eq!(nodes[1].node.list_channels().len(), 0);
+
+ // In the meantime, some time passes.
+ for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
+ nodes[0].node.timer_tick_occurred();
+ }
+
+ // Since we disconnected from peer and did not connect back within time,
+ // we should have forced-closed the channel by now.
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ assert_eq!(nodes[0].node.list_channels().len(), 0);
+
+ {
+ // Since accept channel message was never received
+ // The channel should be forced close by now from node 0 side
+ // and the peer removed from per_peer_state
+ let node_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
+ assert_eq!(node_0_per_peer_state.len(), 0);
+ }
+}
+
+#[test]
+fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() {
+ // Create network of two nodes
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Simulate peer-disconnects mid-handshake
+ // The channel is initiated from the node 0 side,
+ // but the nodes disconnect before node 1 could send accept channel
+ let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
+ let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+ // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
+ assert_eq!(nodes[0].node.list_channels().len(), 1);
+
+ // Since channel was inbound from node[1] perspective, it should have been immediately dropped.
+ assert_eq!(nodes[1].node.list_channels().len(), 0);
+
+ // The peers now reconnect
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+ features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+ }, true).unwrap();
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+ features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+ }, false).unwrap();
+
+ // Make sure the SendOpenChannel message is added to node_0 pending message events
+ let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 1);
+ match &msg_events[0] {
+ MessageSendEvent::SendOpenChannel { msg, .. } => assert_eq!(msg, &open_channel_msg),
+ _ => panic!("Unexpected message."),
+ }
+}
+
fn do_test_multi_post_event_actions(do_reload: bool) {
// Tests handling multiple post-Event actions at once.
// There is specific code in ChannelManager to handle channels where multiple post-Event
// Complete the persistence of the monitor.
nodes[0].chain_monitor.complete_sole_pending_chan_update(
- &OutPoint { txid: tx.txid(), index: 1 }.to_channel_id()
+ &ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 1 })
);
let events = nodes[0].node.get_and_clear_pending_events();
}
#[test]
-fn test_disconnect_in_funding_batch() {
+fn test_close_in_funding_batch() {
+ // This test ensures that if one of the channels
+ // in the batch closes, the complete batch will close.
let chanmon_cfgs = create_chanmon_cfgs(3);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
// The transaction should not have been broadcast before all channels are ready.
assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
- // The remaining peer in the batch disconnects.
- nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
-
- // The channels in the batch will close immediately.
+ // Force-close the channel for which we've completed the initial monitor.
let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
- let channel_id_1 = funding_txo_1.to_channel_id();
- let channel_id_2 = funding_txo_2.to_channel_id();
+ let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
+ let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
+
+ nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
+
+ // The monitor should become closed.
+ check_added_monitors(&nodes[0], 1);
+ {
+ let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
+ let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
+ assert_eq!(monitor_updates_1.len(), 1);
+ assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
+ }
+
+ let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ match msg_events[0] {
+ MessageSendEvent::HandleError { .. } => (),
+ _ => panic!("Unexpected message."),
+ }
+
+ // We broadcast the commitment transaction as part of the force-close.
+ {
+ let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
+ assert_eq!(broadcasted_txs.len(), 1);
+ assert!(broadcasted_txs[0].txid() != tx.txid());
+ assert_eq!(broadcasted_txs[0].input.len(), 1);
+ assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
+ }
+
+ // All channels in the batch should close immediately.
check_closed_events(&nodes[0], &[
ExpectedCloseEvent {
channel_id: Some(channel_id_1),
},
]);
- // The monitor should become closed.
- check_added_monitors(&nodes[0], 1);
- {
- let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
- let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
- assert_eq!(monitor_updates_1.len(), 1);
- assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
- }
-
- // The funding transaction should not have been broadcast, and therefore, we don't need
- // to broadcast a force-close transaction for the closed monitor.
- assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
-
// Ensure the channels don't exist anymore.
assert!(nodes[0].node.list_channels().is_empty());
}
// Force-close the channel for which we've completed the initial monitor.
let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
- let channel_id_1 = funding_txo_1.to_channel_id();
- let channel_id_2 = funding_txo_2.to_channel_id();
+ let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
+ let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
check_added_monitors(&nodes[0], 2);
{
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
- let chan_id = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 }.to_channel_id();
+ let chan_id = ChannelId::v1_from_funding_outpoint(chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 });
assert_eq!(nodes[0].node.list_channels().len(), 1);
assert_eq!(nodes[1].node.list_channels().len(), 1);
do_test_funding_and_commitment_tx_confirm_same_block(false);
do_test_funding_and_commitment_tx_confirm_same_block(true);
}
+
+#[test]
+fn test_accept_inbound_channel_errors_queued() {
+ // For manually accepted inbound channels, tests that a close error is correctly handled
+ // and the channel fails for the initiator.
+ let mut config0 = test_default_channel_config();
+ let mut config1 = config0.clone();
+ config1.channel_handshake_limits.their_to_self_delay = 1000;
+ config1.manually_accept_inbound_channels = true;
+ config0.channel_handshake_config.our_to_self_delay = 2000;
+
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config0), Some(config1)]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
+ let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+ let events = nodes[1].node.get_and_clear_pending_events();
+ match events[0] {
+ Event::OpenChannelRequest { temporary_channel_id, .. } => {
+ match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23) {
+ Err(APIError::ChannelUnavailable { err: _ }) => (),
+ _ => panic!(),
+ }
+ }
+ _ => panic!("Unexpected event"),
+ }
+ assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
+ open_channel_msg.common_fields.temporary_channel_id);
+}
//! Utilities to generate inbound payment information in service of invoice creation.
-use alloc::string::ToString;
use bitcoin::hashes::{Hash, HashEngine};
use bitcoin::hashes::cmp::fixed_time_eq;
use bitcoin::hashes::hmac::{Hmac, HmacEngine};
use crate::util::errors::APIError;
use crate::util::logger::Logger;
-use core::convert::{TryFrom, TryInto};
+#[allow(unused_imports)]
+use crate::prelude::*;
+
use core::ops::Deref;
pub(crate) const IV_LEN: usize = 16;
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+use crate::io_extras::sink;
+use crate::prelude::*;
+use core::ops::Deref;
+
+use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR;
+use bitcoin::consensus::Encodable;
+use bitcoin::policy::MAX_STANDARD_TX_WEIGHT;
+use bitcoin::{
+ absolute::LockTime as AbsoluteLockTime, OutPoint, ScriptBuf, Sequence, Transaction, TxIn,
+ TxOut, Weight,
+};
+
+use crate::chain::chaininterface::fee_for_weight;
+use crate::events::bump_transaction::{BASE_INPUT_WEIGHT, EMPTY_SCRIPT_SIG_WEIGHT};
+use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS;
+use crate::ln::msgs::SerialId;
+use crate::ln::{msgs, ChannelId};
+use crate::sign::{EntropySource, P2TR_KEY_PATH_WITNESS_WEIGHT, P2WPKH_WITNESS_WEIGHT};
+use crate::util::ser::TransactionU16LenLimited;
+
+/// The number of received `tx_add_input` messages during a negotiation at which point the
+/// negotiation MUST be failed.
+const MAX_RECEIVED_TX_ADD_INPUT_COUNT: u16 = 4096;
+
+/// The number of received `tx_add_output` messages during a negotiation at which point the
+/// negotiation MUST be failed.
+const MAX_RECEIVED_TX_ADD_OUTPUT_COUNT: u16 = 4096;
+
+/// The number of inputs or outputs that the state machine can have, before it MUST fail the
+/// negotiation.
+const MAX_INPUTS_OUTPUTS_COUNT: usize = 252;
+
+/// The total weight of the common fields whose fee is paid by the initiator of the interactive
+/// transaction construction protocol.
+const TX_COMMON_FIELDS_WEIGHT: u64 = (4 /* version */ + 4 /* locktime */ + 1 /* input count */ +
+ 1 /* output count */) * WITNESS_SCALE_FACTOR as u64 + 2 /* segwit marker + flag */;
+
+// BOLT 3 - Lower bounds for input weights
+
+/// Lower bound for P2WPKH input weight
+pub(crate) const P2WPKH_INPUT_WEIGHT_LOWER_BOUND: u64 =
+ BASE_INPUT_WEIGHT + EMPTY_SCRIPT_SIG_WEIGHT + P2WPKH_WITNESS_WEIGHT;
+
+/// Lower bound for P2WSH input weight is chosen as same as P2WPKH input weight in BOLT 3
+pub(crate) const P2WSH_INPUT_WEIGHT_LOWER_BOUND: u64 = P2WPKH_INPUT_WEIGHT_LOWER_BOUND;
+
+/// Lower bound for P2TR input weight is chosen as the key spend path.
+/// Not specified in BOLT 3, but a reasonable lower bound.
+pub(crate) const P2TR_INPUT_WEIGHT_LOWER_BOUND: u64 =
+ BASE_INPUT_WEIGHT + EMPTY_SCRIPT_SIG_WEIGHT + P2TR_KEY_PATH_WITNESS_WEIGHT;
+
+/// Lower bound for unknown segwit version input weight is chosen the same as P2WPKH in BOLT 3
+pub(crate) const UNKNOWN_SEGWIT_VERSION_INPUT_WEIGHT_LOWER_BOUND: u64 =
+ P2WPKH_INPUT_WEIGHT_LOWER_BOUND;
+
+trait SerialIdExt {
+ fn is_for_initiator(&self) -> bool;
+ fn is_for_non_initiator(&self) -> bool;
+}
+
+impl SerialIdExt for SerialId {
+ fn is_for_initiator(&self) -> bool {
+ self % 2 == 0
+ }
+
+ fn is_for_non_initiator(&self) -> bool {
+ !self.is_for_initiator()
+ }
+}
+
+#[derive(Debug, Clone, PartialEq)]
+pub(crate) enum AbortReason {
+ InvalidStateTransition,
+ UnexpectedCounterpartyMessage,
+ ReceivedTooManyTxAddInputs,
+ ReceivedTooManyTxAddOutputs,
+ IncorrectInputSequenceValue,
+ IncorrectSerialIdParity,
+ SerialIdUnknown,
+ DuplicateSerialId,
+ PrevTxOutInvalid,
+ ExceededMaximumSatsAllowed,
+ ExceededNumberOfInputsOrOutputs,
+ TransactionTooLarge,
+ BelowDustLimit,
+ InvalidOutputScript,
+ InsufficientFees,
+ OutputsValueExceedsInputsValue,
+ InvalidTx,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub(crate) struct InteractiveTxInput {
+ serial_id: SerialId,
+ input: TxIn,
+ prev_output: TxOut,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub(crate) struct InteractiveTxOutput {
+ serial_id: SerialId,
+ tx_out: TxOut,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub(crate) struct ConstructedTransaction {
+ holder_is_initiator: bool,
+
+ inputs: Vec<InteractiveTxInput>,
+ outputs: Vec<InteractiveTxOutput>,
+
+ local_inputs_value_satoshis: u64,
+ local_outputs_value_satoshis: u64,
+
+ remote_inputs_value_satoshis: u64,
+ remote_outputs_value_satoshis: u64,
+
+ lock_time: AbsoluteLockTime,
+}
+
+impl ConstructedTransaction {
+ fn new(context: NegotiationContext) -> Self {
+ let local_inputs_value_satoshis = context
+ .inputs
+ .iter()
+ .filter(|(serial_id, _)| {
+ !is_serial_id_valid_for_counterparty(context.holder_is_initiator, serial_id)
+ })
+ .fold(0u64, |value, (_, input)| value.saturating_add(input.prev_output.value));
+
+ let local_outputs_value_satoshis = context
+ .outputs
+ .iter()
+ .filter(|(serial_id, _)| {
+ !is_serial_id_valid_for_counterparty(context.holder_is_initiator, serial_id)
+ })
+ .fold(0u64, |value, (_, output)| value.saturating_add(output.tx_out.value));
+
+ Self {
+ holder_is_initiator: context.holder_is_initiator,
+
+ local_inputs_value_satoshis,
+ local_outputs_value_satoshis,
+
+ remote_inputs_value_satoshis: context.remote_inputs_value(),
+ remote_outputs_value_satoshis: context.remote_outputs_value(),
+
+ inputs: context.inputs.into_values().collect(),
+ outputs: context.outputs.into_values().collect(),
+
+ lock_time: context.tx_locktime,
+ }
+ }
+
+ pub fn weight(&self) -> Weight {
+ let inputs_weight = self.inputs.iter().fold(
+ Weight::from_wu(0),
+ |weight, InteractiveTxInput { prev_output, .. }| {
+ weight.checked_add(estimate_input_weight(prev_output)).unwrap_or(Weight::MAX)
+ },
+ );
+ let outputs_weight = self.outputs.iter().fold(
+ Weight::from_wu(0),
+ |weight, InteractiveTxOutput { tx_out, .. }| {
+ weight.checked_add(get_output_weight(&tx_out.script_pubkey)).unwrap_or(Weight::MAX)
+ },
+ );
+ Weight::from_wu(TX_COMMON_FIELDS_WEIGHT)
+ .checked_add(inputs_weight)
+ .and_then(|weight| weight.checked_add(outputs_weight))
+ .unwrap_or(Weight::MAX)
+ }
+
+ pub fn into_unsigned_tx(self) -> Transaction {
+ // Inputs and outputs must be sorted by serial_id
+ let ConstructedTransaction { mut inputs, mut outputs, .. } = self;
+
+ inputs.sort_unstable_by_key(|InteractiveTxInput { serial_id, .. }| *serial_id);
+ outputs.sort_unstable_by_key(|InteractiveTxOutput { serial_id, .. }| *serial_id);
+
+ let input: Vec<TxIn> =
+ inputs.into_iter().map(|InteractiveTxInput { input, .. }| input).collect();
+ let output: Vec<TxOut> =
+ outputs.into_iter().map(|InteractiveTxOutput { tx_out, .. }| tx_out).collect();
+
+ Transaction { version: 2, lock_time: self.lock_time, input, output }
+ }
+}
+
+#[derive(Debug)]
+struct NegotiationContext {
+ holder_is_initiator: bool,
+ received_tx_add_input_count: u16,
+ received_tx_add_output_count: u16,
+ inputs: HashMap<SerialId, InteractiveTxInput>,
+ prevtx_outpoints: HashSet<OutPoint>,
+ outputs: HashMap<SerialId, InteractiveTxOutput>,
+ tx_locktime: AbsoluteLockTime,
+ feerate_sat_per_kw: u32,
+}
+
+pub(crate) fn estimate_input_weight(prev_output: &TxOut) -> Weight {
+ Weight::from_wu(if prev_output.script_pubkey.is_v0_p2wpkh() {
+ P2WPKH_INPUT_WEIGHT_LOWER_BOUND
+ } else if prev_output.script_pubkey.is_v0_p2wsh() {
+ P2WSH_INPUT_WEIGHT_LOWER_BOUND
+ } else if prev_output.script_pubkey.is_v1_p2tr() {
+ P2TR_INPUT_WEIGHT_LOWER_BOUND
+ } else {
+ UNKNOWN_SEGWIT_VERSION_INPUT_WEIGHT_LOWER_BOUND
+ })
+}
+
+pub(crate) fn get_output_weight(script_pubkey: &ScriptBuf) -> Weight {
+ Weight::from_wu(
+ (8 /* value */ + script_pubkey.consensus_encode(&mut sink()).unwrap() as u64)
+ * WITNESS_SCALE_FACTOR as u64,
+ )
+}
+
+fn is_serial_id_valid_for_counterparty(holder_is_initiator: bool, serial_id: &SerialId) -> bool {
+ // A received `SerialId`'s parity must match the role of the counterparty.
+ holder_is_initiator == serial_id.is_for_non_initiator()
+}
+
+impl NegotiationContext {
+ fn is_serial_id_valid_for_counterparty(&self, serial_id: &SerialId) -> bool {
+ is_serial_id_valid_for_counterparty(self.holder_is_initiator, serial_id)
+ }
+
+ fn remote_inputs_value(&self) -> u64 {
+ self.inputs
+ .iter()
+ .filter(|(serial_id, _)| self.is_serial_id_valid_for_counterparty(serial_id))
+ .fold(0u64, |acc, (_, InteractiveTxInput { prev_output, .. })| {
+ acc.saturating_add(prev_output.value)
+ })
+ }
+
+ fn remote_outputs_value(&self) -> u64 {
+ self.outputs
+ .iter()
+ .filter(|(serial_id, _)| self.is_serial_id_valid_for_counterparty(serial_id))
+ .fold(0u64, |acc, (_, InteractiveTxOutput { tx_out, .. })| {
+ acc.saturating_add(tx_out.value)
+ })
+ }
+
+ fn remote_inputs_weight(&self) -> Weight {
+ Weight::from_wu(
+ self.inputs
+ .iter()
+ .filter(|(serial_id, _)| self.is_serial_id_valid_for_counterparty(serial_id))
+ .fold(0u64, |weight, (_, InteractiveTxInput { prev_output, .. })| {
+ weight.saturating_add(estimate_input_weight(prev_output).to_wu())
+ }),
+ )
+ }
+
+ fn remote_outputs_weight(&self) -> Weight {
+ Weight::from_wu(
+ self.outputs
+ .iter()
+ .filter(|(serial_id, _)| self.is_serial_id_valid_for_counterparty(serial_id))
+ .fold(0u64, |weight, (_, InteractiveTxOutput { tx_out, .. })| {
+ weight.saturating_add(get_output_weight(&tx_out.script_pubkey).to_wu())
+ }),
+ )
+ }
+
+ fn received_tx_add_input(&mut self, msg: &msgs::TxAddInput) -> Result<(), AbortReason> {
+ // The interactive-txs spec calls for us to fail negotiation if the `prevtx` we receive is
+ // invalid. However, we would not need to account for this explicit negotiation failure
+ // mode here since `PeerManager` would already disconnect the peer if the `prevtx` is
+ // invalid; implicitly ending the negotiation.
+
+ if !self.is_serial_id_valid_for_counterparty(&msg.serial_id) {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - the `serial_id` has the wrong parity
+ return Err(AbortReason::IncorrectSerialIdParity);
+ }
+
+ self.received_tx_add_input_count += 1;
+ if self.received_tx_add_input_count > MAX_RECEIVED_TX_ADD_INPUT_COUNT {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - if has received 4096 `tx_add_input` messages during this negotiation
+ return Err(AbortReason::ReceivedTooManyTxAddInputs);
+ }
+
+ if msg.sequence >= 0xFFFFFFFE {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - `sequence` is set to `0xFFFFFFFE` or `0xFFFFFFFF`
+ return Err(AbortReason::IncorrectInputSequenceValue);
+ }
+
+ let transaction = msg.prevtx.as_transaction();
+ let txid = transaction.txid();
+
+ if let Some(tx_out) = transaction.output.get(msg.prevtx_out as usize) {
+ if !tx_out.script_pubkey.is_witness_program() {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - the `scriptPubKey` is not a witness program
+ return Err(AbortReason::PrevTxOutInvalid);
+ }
+
+ if !self.prevtx_outpoints.insert(OutPoint { txid, vout: msg.prevtx_out }) {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - the `prevtx` and `prevtx_vout` are identical to a previously added
+ // (and not removed) input's
+ return Err(AbortReason::PrevTxOutInvalid);
+ }
+ } else {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - `prevtx_vout` is greater or equal to the number of outputs on `prevtx`
+ return Err(AbortReason::PrevTxOutInvalid);
+ }
+
+ let prev_out = if let Some(prev_out) = transaction.output.get(msg.prevtx_out as usize) {
+ prev_out.clone()
+ } else {
+ return Err(AbortReason::PrevTxOutInvalid);
+ };
+ match self.inputs.entry(msg.serial_id) {
+ hash_map::Entry::Occupied(_) => {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - the `serial_id` is already included in the transaction
+ Err(AbortReason::DuplicateSerialId)
+ },
+ hash_map::Entry::Vacant(entry) => {
+ let prev_outpoint = OutPoint { txid, vout: msg.prevtx_out };
+ entry.insert(InteractiveTxInput {
+ serial_id: msg.serial_id,
+ input: TxIn {
+ previous_output: prev_outpoint,
+ sequence: Sequence(msg.sequence),
+ ..Default::default()
+ },
+ prev_output: prev_out,
+ });
+ self.prevtx_outpoints.insert(prev_outpoint);
+ Ok(())
+ },
+ }
+ }
+
+ fn received_tx_remove_input(&mut self, msg: &msgs::TxRemoveInput) -> Result<(), AbortReason> {
+ if !self.is_serial_id_valid_for_counterparty(&msg.serial_id) {
+ return Err(AbortReason::IncorrectSerialIdParity);
+ }
+
+ self.inputs
+ .remove(&msg.serial_id)
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - the input or output identified by the `serial_id` was not added by the sender
+ // - the `serial_id` does not correspond to a currently added input
+ .ok_or(AbortReason::SerialIdUnknown)
+ .map(|_| ())
+ }
+
+ fn received_tx_add_output(&mut self, msg: &msgs::TxAddOutput) -> Result<(), AbortReason> {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - the serial_id has the wrong parity
+ if !self.is_serial_id_valid_for_counterparty(&msg.serial_id) {
+ return Err(AbortReason::IncorrectSerialIdParity);
+ }
+
+ self.received_tx_add_output_count += 1;
+ if self.received_tx_add_output_count > MAX_RECEIVED_TX_ADD_OUTPUT_COUNT {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - if has received 4096 `tx_add_output` messages during this negotiation
+ return Err(AbortReason::ReceivedTooManyTxAddOutputs);
+ }
+
+ if msg.sats < msg.script.dust_value().to_sat() {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - the sats amount is less than the dust_limit
+ return Err(AbortReason::BelowDustLimit);
+ }
+
+ // Check that adding this output would not cause the total output value to exceed the total
+ // bitcoin supply.
+ let mut outputs_value: u64 = 0;
+ for output in self.outputs.iter() {
+ outputs_value = outputs_value.saturating_add(output.1.tx_out.value);
+ }
+ if outputs_value.saturating_add(msg.sats) > TOTAL_BITCOIN_SUPPLY_SATOSHIS {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - the sats amount is greater than 2,100,000,000,000,000 (TOTAL_BITCOIN_SUPPLY_SATOSHIS)
+ return Err(AbortReason::ExceededMaximumSatsAllowed);
+ }
+
+ // The receiving node:
+ // - MUST accept P2WSH, P2WPKH, P2TR scripts
+ // - MAY fail the negotiation if script is non-standard
+ //
+ // We can actually be a bit looser than the above as only witness version 0 has special
+ // length-based standardness constraints to match similar consensus rules. All witness scripts
+ // with witness versions V1 and up are always considered standard. Yes, the scripts can be
+ // anyone-can-spend-able, but if our counterparty wants to add an output like that then it's none
+ // of our concern really ¯\_(ツ)_/¯
+ //
+ // TODO: The last check would be simplified when https://github.com/rust-bitcoin/rust-bitcoin/commit/1656e1a09a1959230e20af90d20789a4a8f0a31b
+ // hits the next release of rust-bitcoin.
+ if !(msg.script.is_v0_p2wpkh()
+ || msg.script.is_v0_p2wsh()
+ || (msg.script.is_witness_program()
+ && msg.script.witness_version().map(|v| v.to_num() >= 1).unwrap_or(false)))
+ {
+ return Err(AbortReason::InvalidOutputScript);
+ }
+
+ match self.outputs.entry(msg.serial_id) {
+ hash_map::Entry::Occupied(_) => {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - the `serial_id` is already included in the transaction
+ Err(AbortReason::DuplicateSerialId)
+ },
+ hash_map::Entry::Vacant(entry) => {
+ entry.insert(InteractiveTxOutput {
+ serial_id: msg.serial_id,
+ tx_out: TxOut { value: msg.sats, script_pubkey: msg.script.clone() },
+ });
+ Ok(())
+ },
+ }
+ }
+
+ fn received_tx_remove_output(&mut self, msg: &msgs::TxRemoveOutput) -> Result<(), AbortReason> {
+ if !self.is_serial_id_valid_for_counterparty(&msg.serial_id) {
+ return Err(AbortReason::IncorrectSerialIdParity);
+ }
+ if self.outputs.remove(&msg.serial_id).is_some() {
+ Ok(())
+ } else {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - the input or output identified by the `serial_id` was not added by the sender
+ // - the `serial_id` does not correspond to a currently added input
+ Err(AbortReason::SerialIdUnknown)
+ }
+ }
+
+ fn sent_tx_add_input(&mut self, msg: &msgs::TxAddInput) -> Result<(), AbortReason> {
+ let tx = msg.prevtx.as_transaction();
+ let input = TxIn {
+ previous_output: OutPoint { txid: tx.txid(), vout: msg.prevtx_out },
+ sequence: Sequence(msg.sequence),
+ ..Default::default()
+ };
+ let prev_output =
+ tx.output.get(msg.prevtx_out as usize).ok_or(AbortReason::PrevTxOutInvalid)?.clone();
+ if !self.prevtx_outpoints.insert(input.previous_output) {
+ // We have added an input that already exists
+ return Err(AbortReason::PrevTxOutInvalid);
+ }
+ self.inputs.insert(
+ msg.serial_id,
+ InteractiveTxInput { serial_id: msg.serial_id, input, prev_output },
+ );
+ Ok(())
+ }
+
+ fn sent_tx_add_output(&mut self, msg: &msgs::TxAddOutput) -> Result<(), AbortReason> {
+ self.outputs.insert(
+ msg.serial_id,
+ InteractiveTxOutput {
+ serial_id: msg.serial_id,
+ tx_out: TxOut { value: msg.sats, script_pubkey: msg.script.clone() },
+ },
+ );
+ Ok(())
+ }
+
+ fn sent_tx_remove_input(&mut self, msg: &msgs::TxRemoveInput) -> Result<(), AbortReason> {
+ self.inputs.remove(&msg.serial_id);
+ Ok(())
+ }
+
+ fn sent_tx_remove_output(&mut self, msg: &msgs::TxRemoveOutput) -> Result<(), AbortReason> {
+ self.outputs.remove(&msg.serial_id);
+ Ok(())
+ }
+
+ fn check_counterparty_fees(
+ &self, counterparty_fees_contributed: u64,
+ ) -> Result<(), AbortReason> {
+ let counterparty_weight_contributed = self
+ .remote_inputs_weight()
+ .to_wu()
+ .saturating_add(self.remote_outputs_weight().to_wu());
+ let mut required_counterparty_contribution_fee =
+ fee_for_weight(self.feerate_sat_per_kw, counterparty_weight_contributed);
+ if !self.holder_is_initiator {
+ // if is the non-initiator:
+ // - the initiator's fees do not cover the common fields (version, segwit marker + flag,
+ // input count, output count, locktime)
+ let tx_common_fields_fee =
+ fee_for_weight(self.feerate_sat_per_kw, TX_COMMON_FIELDS_WEIGHT);
+ required_counterparty_contribution_fee += tx_common_fields_fee;
+ }
+ if counterparty_fees_contributed < required_counterparty_contribution_fee {
+ return Err(AbortReason::InsufficientFees);
+ }
+ Ok(())
+ }
+
+ fn validate_tx(self) -> Result<ConstructedTransaction, AbortReason> {
+ // The receiving node:
+ // MUST fail the negotiation if:
+
+ // - the peer's total input satoshis is less than their outputs
+ let remote_inputs_value = self.remote_inputs_value();
+ let remote_outputs_value = self.remote_outputs_value();
+ if remote_inputs_value < remote_outputs_value {
+ return Err(AbortReason::OutputsValueExceedsInputsValue);
+ }
+
+ // - there are more than 252 inputs
+ // - there are more than 252 outputs
+ if self.inputs.len() > MAX_INPUTS_OUTPUTS_COUNT
+ || self.outputs.len() > MAX_INPUTS_OUTPUTS_COUNT
+ {
+ return Err(AbortReason::ExceededNumberOfInputsOrOutputs);
+ }
+
+ // - the peer's paid feerate does not meet or exceed the agreed feerate (based on the minimum fee).
+ self.check_counterparty_fees(remote_inputs_value.saturating_sub(remote_outputs_value))?;
+
+ let constructed_tx = ConstructedTransaction::new(self);
+
+ if constructed_tx.weight().to_wu() > MAX_STANDARD_TX_WEIGHT as u64 {
+ return Err(AbortReason::TransactionTooLarge);
+ }
+
+ Ok(constructed_tx)
+ }
+}
+
+// The interactive transaction construction protocol allows two peers to collaboratively build a
+// transaction for broadcast.
+//
+// The protocol is turn-based, so we define different states here that we store depending on whose
+// turn it is to send the next message. The states are defined so that their types ensure we only
+// perform actions (only send messages) via defined state transitions that do not violate the
+// protocol.
+//
+// An example of a full negotiation and associated states follows:
+//
+// +------------+ +------------------+---- Holder state after message sent/received ----+
+// | |--(1)- tx_add_input ---->| | SentChangeMsg +
+// | |<-(2)- tx_complete ------| | ReceivedTxComplete +
+// | |--(3)- tx_add_output --->| | SentChangeMsg +
+// | |<-(4)- tx_complete ------| | ReceivedTxComplete +
+// | |--(5)- tx_add_input ---->| | SentChangeMsg +
+// | Holder |<-(6)- tx_add_input -----| Counterparty | ReceivedChangeMsg +
+// | |--(7)- tx_remove_output >| | SentChangeMsg +
+// | |<-(8)- tx_add_output ----| | ReceivedChangeMsg +
+// | |--(9)- tx_complete ----->| | SentTxComplete +
+// | |<-(10) tx_complete ------| | NegotiationComplete +
+// +------------+ +------------------+--------------------------------------------------+
+
+/// Negotiation states that can send & receive `tx_(add|remove)_(input|output)` and `tx_complete`
+trait State {}
+
+/// Category of states where we have sent some message to the counterparty, and we are waiting for
+/// a response.
+trait SentMsgState: State {
+ fn into_negotiation_context(self) -> NegotiationContext;
+}
+
+/// Category of states that our counterparty has put us in after we receive a message from them.
+trait ReceivedMsgState: State {
+ fn into_negotiation_context(self) -> NegotiationContext;
+}
+
+// This macro is a helper for implementing the above state traits for various states subsequently
+// defined below the macro.
+macro_rules! define_state {
+ (SENT_MSG_STATE, $state: ident, $doc: expr) => {
+ define_state!($state, NegotiationContext, $doc);
+ impl SentMsgState for $state {
+ fn into_negotiation_context(self) -> NegotiationContext {
+ self.0
+ }
+ }
+ };
+ (RECEIVED_MSG_STATE, $state: ident, $doc: expr) => {
+ define_state!($state, NegotiationContext, $doc);
+ impl ReceivedMsgState for $state {
+ fn into_negotiation_context(self) -> NegotiationContext {
+ self.0
+ }
+ }
+ };
+ ($state: ident, $inner: ident, $doc: expr) => {
+ #[doc = $doc]
+ #[derive(Debug)]
+ struct $state($inner);
+ impl State for $state {}
+ };
+}
+
+define_state!(
+ SENT_MSG_STATE,
+ SentChangeMsg,
+ "We have sent a message to the counterparty that has affected our negotiation state."
+);
+define_state!(
+ SENT_MSG_STATE,
+ SentTxComplete,
+ "We have sent a `tx_complete` message and are awaiting the counterparty's."
+);
+define_state!(
+ RECEIVED_MSG_STATE,
+ ReceivedChangeMsg,
+ "We have received a message from the counterparty that has affected our negotiation state."
+);
+define_state!(
+ RECEIVED_MSG_STATE,
+ ReceivedTxComplete,
+ "We have received a `tx_complete` message and the counterparty is awaiting ours."
+);
+define_state!(NegotiationComplete, ConstructedTransaction, "We have exchanged consecutive `tx_complete` messages with the counterparty and the transaction negotiation is complete.");
+define_state!(
+ NegotiationAborted,
+ AbortReason,
+ "The negotiation has failed and cannot be continued."
+);
+
+type StateTransitionResult<S> = Result<S, AbortReason>;
+
+trait StateTransition<NewState: State, TransitionData> {
+ fn transition(self, data: TransitionData) -> StateTransitionResult<NewState>;
+}
+
+// This macro helps define the legal transitions between the states above by implementing
+// the `StateTransition` trait for each of the states that follow this declaration.
+macro_rules! define_state_transitions {
+ (SENT_MSG_STATE, [$(DATA $data: ty, TRANSITION $transition: ident),+]) => {
+ $(
+ impl<S: SentMsgState> StateTransition<ReceivedChangeMsg, $data> for S {
+ fn transition(self, data: $data) -> StateTransitionResult<ReceivedChangeMsg> {
+ let mut context = self.into_negotiation_context();
+ context.$transition(data)?;
+ Ok(ReceivedChangeMsg(context))
+ }
+ }
+ )*
+ };
+ (RECEIVED_MSG_STATE, [$(DATA $data: ty, TRANSITION $transition: ident),+]) => {
+ $(
+ impl<S: ReceivedMsgState> StateTransition<SentChangeMsg, $data> for S {
+ fn transition(self, data: $data) -> StateTransitionResult<SentChangeMsg> {
+ let mut context = self.into_negotiation_context();
+ context.$transition(data)?;
+ Ok(SentChangeMsg(context))
+ }
+ }
+ )*
+ };
+ (TX_COMPLETE, $from_state: ident, $tx_complete_state: ident) => {
+ impl StateTransition<NegotiationComplete, &msgs::TxComplete> for $tx_complete_state {
+ fn transition(self, _data: &msgs::TxComplete) -> StateTransitionResult<NegotiationComplete> {
+ let context = self.into_negotiation_context();
+ let tx = context.validate_tx()?;
+ Ok(NegotiationComplete(tx))
+ }
+ }
+
+ impl StateTransition<$tx_complete_state, &msgs::TxComplete> for $from_state {
+ fn transition(self, _data: &msgs::TxComplete) -> StateTransitionResult<$tx_complete_state> {
+ Ok($tx_complete_state(self.into_negotiation_context()))
+ }
+ }
+ };
+}
+
+// State transitions when we have sent our counterparty some messages and are waiting for them
+// to respond.
+define_state_transitions!(SENT_MSG_STATE, [
+ DATA &msgs::TxAddInput, TRANSITION received_tx_add_input,
+ DATA &msgs::TxRemoveInput, TRANSITION received_tx_remove_input,
+ DATA &msgs::TxAddOutput, TRANSITION received_tx_add_output,
+ DATA &msgs::TxRemoveOutput, TRANSITION received_tx_remove_output
+]);
+// State transitions when we have received some messages from our counterparty and we should
+// respond.
+define_state_transitions!(RECEIVED_MSG_STATE, [
+ DATA &msgs::TxAddInput, TRANSITION sent_tx_add_input,
+ DATA &msgs::TxRemoveInput, TRANSITION sent_tx_remove_input,
+ DATA &msgs::TxAddOutput, TRANSITION sent_tx_add_output,
+ DATA &msgs::TxRemoveOutput, TRANSITION sent_tx_remove_output
+]);
+define_state_transitions!(TX_COMPLETE, SentChangeMsg, ReceivedTxComplete);
+define_state_transitions!(TX_COMPLETE, ReceivedChangeMsg, SentTxComplete);
+
+#[derive(Debug)]
+enum StateMachine {
+ Indeterminate,
+ SentChangeMsg(SentChangeMsg),
+ ReceivedChangeMsg(ReceivedChangeMsg),
+ SentTxComplete(SentTxComplete),
+ ReceivedTxComplete(ReceivedTxComplete),
+ NegotiationComplete(NegotiationComplete),
+ NegotiationAborted(NegotiationAborted),
+}
+
+impl Default for StateMachine {
+ fn default() -> Self {
+ Self::Indeterminate
+ }
+}
+
+// The `StateMachine` internally executes the actual transition between two states and keeps
+// track of the current state. This macro defines _how_ those state transitions happen to
+// update the internal state.
+macro_rules! define_state_machine_transitions {
+ ($transition: ident, $msg: ty, [$(FROM $from_state: ident, TO $to_state: ident),+]) => {
+ fn $transition(self, msg: $msg) -> StateMachine {
+ match self {
+ $(
+ Self::$from_state(s) => match s.transition(msg) {
+ Ok(new_state) => StateMachine::$to_state(new_state),
+ Err(abort_reason) => StateMachine::NegotiationAborted(NegotiationAborted(abort_reason)),
+ }
+ )*
+ _ => StateMachine::NegotiationAborted(NegotiationAborted(AbortReason::UnexpectedCounterpartyMessage)),
+ }
+ }
+ };
+}
+
+impl StateMachine {
+ fn new(feerate_sat_per_kw: u32, is_initiator: bool, tx_locktime: AbsoluteLockTime) -> Self {
+ let context = NegotiationContext {
+ tx_locktime,
+ holder_is_initiator: is_initiator,
+ received_tx_add_input_count: 0,
+ received_tx_add_output_count: 0,
+ inputs: new_hash_map(),
+ prevtx_outpoints: new_hash_set(),
+ outputs: new_hash_map(),
+ feerate_sat_per_kw,
+ };
+ if is_initiator {
+ Self::ReceivedChangeMsg(ReceivedChangeMsg(context))
+ } else {
+ Self::SentChangeMsg(SentChangeMsg(context))
+ }
+ }
+
+ // TxAddInput
+ define_state_machine_transitions!(sent_tx_add_input, &msgs::TxAddInput, [
+ FROM ReceivedChangeMsg, TO SentChangeMsg,
+ FROM ReceivedTxComplete, TO SentChangeMsg
+ ]);
+ define_state_machine_transitions!(received_tx_add_input, &msgs::TxAddInput, [
+ FROM SentChangeMsg, TO ReceivedChangeMsg,
+ FROM SentTxComplete, TO ReceivedChangeMsg
+ ]);
+
+ // TxAddOutput
+ define_state_machine_transitions!(sent_tx_add_output, &msgs::TxAddOutput, [
+ FROM ReceivedChangeMsg, TO SentChangeMsg,
+ FROM ReceivedTxComplete, TO SentChangeMsg
+ ]);
+ define_state_machine_transitions!(received_tx_add_output, &msgs::TxAddOutput, [
+ FROM SentChangeMsg, TO ReceivedChangeMsg,
+ FROM SentTxComplete, TO ReceivedChangeMsg
+ ]);
+
+ // TxRemoveInput
+ define_state_machine_transitions!(sent_tx_remove_input, &msgs::TxRemoveInput, [
+ FROM ReceivedChangeMsg, TO SentChangeMsg,
+ FROM ReceivedTxComplete, TO SentChangeMsg
+ ]);
+ define_state_machine_transitions!(received_tx_remove_input, &msgs::TxRemoveInput, [
+ FROM SentChangeMsg, TO ReceivedChangeMsg,
+ FROM SentTxComplete, TO ReceivedChangeMsg
+ ]);
+
+ // TxRemoveOutput
+ define_state_machine_transitions!(sent_tx_remove_output, &msgs::TxRemoveOutput, [
+ FROM ReceivedChangeMsg, TO SentChangeMsg,
+ FROM ReceivedTxComplete, TO SentChangeMsg
+ ]);
+ define_state_machine_transitions!(received_tx_remove_output, &msgs::TxRemoveOutput, [
+ FROM SentChangeMsg, TO ReceivedChangeMsg,
+ FROM SentTxComplete, TO ReceivedChangeMsg
+ ]);
+
+ // TxComplete
+ define_state_machine_transitions!(sent_tx_complete, &msgs::TxComplete, [
+ FROM ReceivedChangeMsg, TO SentTxComplete,
+ FROM ReceivedTxComplete, TO NegotiationComplete
+ ]);
+ define_state_machine_transitions!(received_tx_complete, &msgs::TxComplete, [
+ FROM SentChangeMsg, TO ReceivedTxComplete,
+ FROM SentTxComplete, TO NegotiationComplete
+ ]);
+}
+
+pub(crate) struct InteractiveTxConstructor {
+ state_machine: StateMachine,
+ channel_id: ChannelId,
+ inputs_to_contribute: Vec<(SerialId, TxIn, TransactionU16LenLimited)>,
+ outputs_to_contribute: Vec<(SerialId, TxOut)>,
+}
+
+pub(crate) enum InteractiveTxMessageSend {
+ TxAddInput(msgs::TxAddInput),
+ TxAddOutput(msgs::TxAddOutput),
+ TxComplete(msgs::TxComplete),
+}
+
+// This macro executes a state machine transition based on a provided action.
+macro_rules! do_state_transition {
+ ($self: ident, $transition: ident, $msg: expr) => {{
+ let state_machine = core::mem::take(&mut $self.state_machine);
+ $self.state_machine = state_machine.$transition($msg);
+ match &$self.state_machine {
+ StateMachine::NegotiationAborted(state) => Err(state.0.clone()),
+ _ => Ok(()),
+ }
+ }};
+}
+
+fn generate_holder_serial_id<ES: Deref>(entropy_source: &ES, is_initiator: bool) -> SerialId
+where
+ ES::Target: EntropySource,
+{
+ let rand_bytes = entropy_source.get_secure_random_bytes();
+ let mut serial_id_bytes = [0u8; 8];
+ serial_id_bytes.copy_from_slice(&rand_bytes[..8]);
+ let mut serial_id = u64::from_be_bytes(serial_id_bytes);
+ if serial_id.is_for_initiator() != is_initiator {
+ serial_id ^= 1;
+ }
+ serial_id
+}
+
+pub(crate) enum HandleTxCompleteValue {
+ SendTxMessage(InteractiveTxMessageSend),
+ SendTxComplete(InteractiveTxMessageSend, ConstructedTransaction),
+ NegotiationComplete(ConstructedTransaction),
+}
+
+impl InteractiveTxConstructor {
+ /// Instantiates a new `InteractiveTxConstructor`.
+ ///
+ /// A tuple is returned containing the newly instantiate `InteractiveTxConstructor` and optionally
+ /// an initial wrapped `Tx_` message which the holder needs to send to the counterparty.
+ pub fn new<ES: Deref>(
+ entropy_source: &ES, channel_id: ChannelId, feerate_sat_per_kw: u32, is_initiator: bool,
+ funding_tx_locktime: AbsoluteLockTime,
+ inputs_to_contribute: Vec<(TxIn, TransactionU16LenLimited)>,
+ outputs_to_contribute: Vec<TxOut>,
+ ) -> (Self, Option<InteractiveTxMessageSend>)
+ where
+ ES::Target: EntropySource,
+ {
+ let state_machine =
+ StateMachine::new(feerate_sat_per_kw, is_initiator, funding_tx_locktime);
+ let mut inputs_to_contribute: Vec<(SerialId, TxIn, TransactionU16LenLimited)> =
+ inputs_to_contribute
+ .into_iter()
+ .map(|(input, tx)| {
+ let serial_id = generate_holder_serial_id(entropy_source, is_initiator);
+ (serial_id, input, tx)
+ })
+ .collect();
+ // We'll sort by the randomly generated serial IDs, effectively shuffling the order of the inputs
+ // as the user passed them to us to avoid leaking any potential categorization of transactions
+ // before we pass any of the inputs to the counterparty.
+ inputs_to_contribute.sort_unstable_by_key(|(serial_id, _, _)| *serial_id);
+ let mut outputs_to_contribute: Vec<(SerialId, TxOut)> = outputs_to_contribute
+ .into_iter()
+ .map(|output| {
+ let serial_id = generate_holder_serial_id(entropy_source, is_initiator);
+ (serial_id, output)
+ })
+ .collect();
+ // In the same manner and for the same rationale as the inputs above, we'll shuffle the outputs.
+ outputs_to_contribute.sort_unstable_by_key(|(serial_id, _)| *serial_id);
+ let mut constructor =
+ Self { state_machine, channel_id, inputs_to_contribute, outputs_to_contribute };
+ let message_send = if is_initiator {
+ match constructor.maybe_send_message() {
+ Ok(msg_send) => Some(msg_send),
+ Err(_) => {
+ debug_assert!(
+ false,
+ "We should always be able to start our state machine successfully"
+ );
+ None
+ },
+ }
+ } else {
+ None
+ };
+ (constructor, message_send)
+ }
+
+ fn maybe_send_message(&mut self) -> Result<InteractiveTxMessageSend, AbortReason> {
+ // We first attempt to send inputs we want to add, then outputs. Once we are done sending
+ // them both, then we always send tx_complete.
+ if let Some((serial_id, input, prevtx)) = self.inputs_to_contribute.pop() {
+ let msg = msgs::TxAddInput {
+ channel_id: self.channel_id,
+ serial_id,
+ prevtx,
+ prevtx_out: input.previous_output.vout,
+ sequence: input.sequence.to_consensus_u32(),
+ };
+ do_state_transition!(self, sent_tx_add_input, &msg)?;
+ Ok(InteractiveTxMessageSend::TxAddInput(msg))
+ } else if let Some((serial_id, output)) = self.outputs_to_contribute.pop() {
+ let msg = msgs::TxAddOutput {
+ channel_id: self.channel_id,
+ serial_id,
+ sats: output.value,
+ script: output.script_pubkey,
+ };
+ do_state_transition!(self, sent_tx_add_output, &msg)?;
+ Ok(InteractiveTxMessageSend::TxAddOutput(msg))
+ } else {
+ let msg = msgs::TxComplete { channel_id: self.channel_id };
+ do_state_transition!(self, sent_tx_complete, &msg)?;
+ Ok(InteractiveTxMessageSend::TxComplete(msg))
+ }
+ }
+
+ pub fn handle_tx_add_input(
+ &mut self, msg: &msgs::TxAddInput,
+ ) -> Result<InteractiveTxMessageSend, AbortReason> {
+ do_state_transition!(self, received_tx_add_input, msg)?;
+ self.maybe_send_message()
+ }
+
+ pub fn handle_tx_remove_input(
+ &mut self, msg: &msgs::TxRemoveInput,
+ ) -> Result<InteractiveTxMessageSend, AbortReason> {
+ do_state_transition!(self, received_tx_remove_input, msg)?;
+ self.maybe_send_message()
+ }
+
+ pub fn handle_tx_add_output(
+ &mut self, msg: &msgs::TxAddOutput,
+ ) -> Result<InteractiveTxMessageSend, AbortReason> {
+ do_state_transition!(self, received_tx_add_output, msg)?;
+ self.maybe_send_message()
+ }
+
+ pub fn handle_tx_remove_output(
+ &mut self, msg: &msgs::TxRemoveOutput,
+ ) -> Result<InteractiveTxMessageSend, AbortReason> {
+ do_state_transition!(self, received_tx_remove_output, msg)?;
+ self.maybe_send_message()
+ }
+
+ pub fn handle_tx_complete(
+ &mut self, msg: &msgs::TxComplete,
+ ) -> Result<HandleTxCompleteValue, AbortReason> {
+ do_state_transition!(self, received_tx_complete, msg)?;
+ match &self.state_machine {
+ StateMachine::ReceivedTxComplete(_) => {
+ let msg_send = self.maybe_send_message()?;
+ match &self.state_machine {
+ StateMachine::NegotiationComplete(s) => {
+ Ok(HandleTxCompleteValue::SendTxComplete(msg_send, s.0.clone()))
+ },
+ StateMachine::SentChangeMsg(_) => {
+ Ok(HandleTxCompleteValue::SendTxMessage(msg_send))
+ }, // We either had an input or output to contribute.
+ _ => {
+ debug_assert!(false, "We cannot transition to any other states after receiving `tx_complete` and responding");
+ Err(AbortReason::InvalidStateTransition)
+ },
+ }
+ },
+ StateMachine::NegotiationComplete(s) => {
+ Ok(HandleTxCompleteValue::NegotiationComplete(s.0.clone()))
+ },
+ _ => {
+ debug_assert!(
+ false,
+ "We cannot transition to any other states after receiving `tx_complete`"
+ );
+ Err(AbortReason::InvalidStateTransition)
+ },
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::chain::chaininterface::{fee_for_weight, FEERATE_FLOOR_SATS_PER_KW};
+ use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS;
+ use crate::ln::interactivetxs::{
+ generate_holder_serial_id, AbortReason, HandleTxCompleteValue, InteractiveTxConstructor,
+ InteractiveTxMessageSend, MAX_INPUTS_OUTPUTS_COUNT, MAX_RECEIVED_TX_ADD_INPUT_COUNT,
+ MAX_RECEIVED_TX_ADD_OUTPUT_COUNT,
+ };
+ use crate::ln::ChannelId;
+ use crate::sign::EntropySource;
+ use crate::util::atomic_counter::AtomicCounter;
+ use crate::util::ser::TransactionU16LenLimited;
+ use bitcoin::blockdata::opcodes;
+ use bitcoin::blockdata::script::Builder;
+ use bitcoin::hashes::Hash;
+ use bitcoin::key::UntweakedPublicKey;
+ use bitcoin::secp256k1::{KeyPair, Secp256k1};
+ use bitcoin::{
+ absolute::LockTime as AbsoluteLockTime, OutPoint, Sequence, Transaction, TxIn, TxOut,
+ };
+ use bitcoin::{PubkeyHash, ScriptBuf, WPubkeyHash, WScriptHash};
+ use core::ops::Deref;
+
+ use super::{
+ get_output_weight, P2TR_INPUT_WEIGHT_LOWER_BOUND, P2WPKH_INPUT_WEIGHT_LOWER_BOUND,
+ P2WSH_INPUT_WEIGHT_LOWER_BOUND, TX_COMMON_FIELDS_WEIGHT,
+ };
+
+ const TEST_FEERATE_SATS_PER_KW: u32 = FEERATE_FLOOR_SATS_PER_KW * 10;
+
+ // A simple entropy source that works based on an atomic counter.
+ struct TestEntropySource(AtomicCounter);
+ impl EntropySource for TestEntropySource {
+ fn get_secure_random_bytes(&self) -> [u8; 32] {
+ let mut res = [0u8; 32];
+ let increment = self.0.get_increment();
+ for i in 0..32 {
+ // Rotate the increment value by 'i' bits to the right, to avoid clashes
+ // when `generate_local_serial_id` does a parity flip on consecutive calls for the
+ // same party.
+ let rotated_increment = increment.rotate_right(i as u32);
+ res[i] = (rotated_increment & 0xff) as u8;
+ }
+ res
+ }
+ }
+
+ // An entropy source that deliberately returns you the same seed every time. We use this
+ // to test if the constructor would catch inputs/outputs that are attempting to be added
+ // with duplicate serial ids.
+ struct DuplicateEntropySource;
+ impl EntropySource for DuplicateEntropySource {
+ fn get_secure_random_bytes(&self) -> [u8; 32] {
+ let mut res = [0u8; 32];
+ let count = 1u64;
+ res[0..8].copy_from_slice(&count.to_be_bytes());
+ res
+ }
+ }
+
+ #[derive(Debug, PartialEq, Eq)]
+ enum ErrorCulprit {
+ NodeA,
+ NodeB,
+ // Some error values are only checked at the end of the negotiation and are not easy to attribute
+ // to a particular party. Both parties would indicate an `AbortReason` in this case.
+ // e.g. Exceeded max inputs and outputs after negotiation.
+ Indeterminate,
+ }
+
+ struct TestSession {
+ description: &'static str,
+ inputs_a: Vec<(TxIn, TransactionU16LenLimited)>,
+ outputs_a: Vec<TxOut>,
+ inputs_b: Vec<(TxIn, TransactionU16LenLimited)>,
+ outputs_b: Vec<TxOut>,
+ expect_error: Option<(AbortReason, ErrorCulprit)>,
+ }
+
+ fn do_test_interactive_tx_constructor(session: TestSession) {
+ let entropy_source = TestEntropySource(AtomicCounter::new());
+ do_test_interactive_tx_constructor_internal(session, &&entropy_source);
+ }
+
+ fn do_test_interactive_tx_constructor_with_entropy_source<ES: Deref>(
+ session: TestSession, entropy_source: ES,
+ ) where
+ ES::Target: EntropySource,
+ {
+ do_test_interactive_tx_constructor_internal(session, &entropy_source);
+ }
+
+ fn do_test_interactive_tx_constructor_internal<ES: Deref>(
+ session: TestSession, entropy_source: &ES,
+ ) where
+ ES::Target: EntropySource,
+ {
+ let channel_id = ChannelId(entropy_source.get_secure_random_bytes());
+ let tx_locktime = AbsoluteLockTime::from_height(1337).unwrap();
+
+ let (mut constructor_a, first_message_a) = InteractiveTxConstructor::new(
+ entropy_source,
+ channel_id,
+ TEST_FEERATE_SATS_PER_KW,
+ true,
+ tx_locktime,
+ session.inputs_a,
+ session.outputs_a,
+ );
+ let (mut constructor_b, first_message_b) = InteractiveTxConstructor::new(
+ entropy_source,
+ channel_id,
+ TEST_FEERATE_SATS_PER_KW,
+ false,
+ tx_locktime,
+ session.inputs_b,
+ session.outputs_b,
+ );
+
+ let handle_message_send =
+ |msg: InteractiveTxMessageSend, for_constructor: &mut InteractiveTxConstructor| {
+ match msg {
+ InteractiveTxMessageSend::TxAddInput(msg) => for_constructor
+ .handle_tx_add_input(&msg)
+ .map(|msg_send| (Some(msg_send), None)),
+ InteractiveTxMessageSend::TxAddOutput(msg) => for_constructor
+ .handle_tx_add_output(&msg)
+ .map(|msg_send| (Some(msg_send), None)),
+ InteractiveTxMessageSend::TxComplete(msg) => {
+ for_constructor.handle_tx_complete(&msg).map(|value| match value {
+ HandleTxCompleteValue::SendTxMessage(msg_send) => {
+ (Some(msg_send), None)
+ },
+ HandleTxCompleteValue::SendTxComplete(msg_send, tx) => {
+ (Some(msg_send), Some(tx))
+ },
+ HandleTxCompleteValue::NegotiationComplete(tx) => (None, Some(tx)),
+ })
+ },
+ }
+ };
+
+ assert!(first_message_b.is_none());
+ let mut message_send_a = first_message_a;
+ let mut message_send_b = None;
+ let mut final_tx_a = None;
+ let mut final_tx_b = None;
+ while final_tx_a.is_none() || final_tx_b.is_none() {
+ if let Some(message_send_a) = message_send_a.take() {
+ match handle_message_send(message_send_a, &mut constructor_b) {
+ Ok((msg_send, final_tx)) => {
+ message_send_b = msg_send;
+ final_tx_b = final_tx;
+ },
+ Err(abort_reason) => {
+ let error_culprit = match abort_reason {
+ AbortReason::ExceededNumberOfInputsOrOutputs => {
+ ErrorCulprit::Indeterminate
+ },
+ _ => ErrorCulprit::NodeA,
+ };
+ assert_eq!(
+ Some((abort_reason, error_culprit)),
+ session.expect_error,
+ "Test: {}",
+ session.description
+ );
+ assert!(message_send_b.is_none());
+ return;
+ },
+ }
+ }
+ if let Some(message_send_b) = message_send_b.take() {
+ match handle_message_send(message_send_b, &mut constructor_a) {
+ Ok((msg_send, final_tx)) => {
+ message_send_a = msg_send;
+ final_tx_a = final_tx;
+ },
+ Err(abort_reason) => {
+ let error_culprit = match abort_reason {
+ AbortReason::ExceededNumberOfInputsOrOutputs => {
+ ErrorCulprit::Indeterminate
+ },
+ _ => ErrorCulprit::NodeB,
+ };
+ assert_eq!(
+ Some((abort_reason, error_culprit)),
+ session.expect_error,
+ "Test: {}",
+ session.description
+ );
+ assert!(message_send_a.is_none());
+ return;
+ },
+ }
+ }
+ }
+ assert!(message_send_a.is_none());
+ assert!(message_send_b.is_none());
+ assert_eq!(final_tx_a.unwrap().into_unsigned_tx(), final_tx_b.unwrap().into_unsigned_tx());
+ assert!(session.expect_error.is_none(), "Test: {}", session.description);
+ }
+
+ #[derive(Debug, Clone, Copy)]
+ enum TestOutput {
+ P2WPKH(u64),
+ P2WSH(u64),
+ P2TR(u64),
+ // Non-witness type to test rejection.
+ P2PKH(u64),
+ }
+
+ fn generate_tx(outputs: &[TestOutput]) -> Transaction {
+ generate_tx_with_locktime(outputs, 1337)
+ }
+
+ fn generate_txout(output: &TestOutput) -> TxOut {
+ let secp_ctx = Secp256k1::new();
+ let (value, script_pubkey) = match output {
+ TestOutput::P2WPKH(value) => {
+ (*value, ScriptBuf::new_v0_p2wpkh(&WPubkeyHash::from_slice(&[1; 20]).unwrap()))
+ },
+ TestOutput::P2WSH(value) => {
+ (*value, ScriptBuf::new_v0_p2wsh(&WScriptHash::from_slice(&[2; 32]).unwrap()))
+ },
+ TestOutput::P2TR(value) => (
+ *value,
+ ScriptBuf::new_v1_p2tr(
+ &secp_ctx,
+ UntweakedPublicKey::from_keypair(
+ &KeyPair::from_seckey_slice(&secp_ctx, &[3; 32]).unwrap(),
+ )
+ .0,
+ None,
+ ),
+ ),
+ TestOutput::P2PKH(value) => {
+ (*value, ScriptBuf::new_p2pkh(&PubkeyHash::from_slice(&[4; 20]).unwrap()))
+ },
+ };
+
+ TxOut { value, script_pubkey }
+ }
+
+ fn generate_tx_with_locktime(outputs: &[TestOutput], locktime: u32) -> Transaction {
+ Transaction {
+ version: 2,
+ lock_time: AbsoluteLockTime::from_height(locktime).unwrap(),
+ input: vec![TxIn { ..Default::default() }],
+ output: outputs.iter().map(generate_txout).collect(),
+ }
+ }
+
+ fn generate_inputs(outputs: &[TestOutput]) -> Vec<(TxIn, TransactionU16LenLimited)> {
+ let tx = generate_tx(outputs);
+ let txid = tx.txid();
+ tx.output
+ .iter()
+ .enumerate()
+ .map(|(idx, _)| {
+ let input = TxIn {
+ previous_output: OutPoint { txid, vout: idx as u32 },
+ script_sig: Default::default(),
+ sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
+ witness: Default::default(),
+ };
+ (input, TransactionU16LenLimited::new(tx.clone()).unwrap())
+ })
+ .collect()
+ }
+
+ fn generate_p2wsh_script_pubkey() -> ScriptBuf {
+ Builder::new().push_opcode(opcodes::OP_TRUE).into_script().to_v0_p2wsh()
+ }
+
+ fn generate_p2wpkh_script_pubkey() -> ScriptBuf {
+ ScriptBuf::new_v0_p2wpkh(&WPubkeyHash::from_slice(&[1; 20]).unwrap())
+ }
+
+ fn generate_outputs(outputs: &[TestOutput]) -> Vec<TxOut> {
+ outputs.iter().map(generate_txout).collect()
+ }
+
+ fn generate_fixed_number_of_inputs(count: u16) -> Vec<(TxIn, TransactionU16LenLimited)> {
+ // Generate transactions with a total `count` number of outputs such that no transaction has a
+ // serialized length greater than u16::MAX.
+ let max_outputs_per_prevtx = 1_500;
+ let mut remaining = count;
+ let mut inputs: Vec<(TxIn, TransactionU16LenLimited)> = Vec::with_capacity(count as usize);
+
+ while remaining > 0 {
+ let tx_output_count = remaining.min(max_outputs_per_prevtx);
+ remaining -= tx_output_count;
+
+ // Use unique locktime for each tx so outpoints are different across transactions
+ let tx = generate_tx_with_locktime(
+ &vec![TestOutput::P2WPKH(1_000_000); tx_output_count as usize],
+ (1337 + remaining).into(),
+ );
+ let txid = tx.txid();
+
+ let mut temp: Vec<(TxIn, TransactionU16LenLimited)> = tx
+ .output
+ .iter()
+ .enumerate()
+ .map(|(idx, _)| {
+ let input = TxIn {
+ previous_output: OutPoint { txid, vout: idx as u32 },
+ script_sig: Default::default(),
+ sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
+ witness: Default::default(),
+ };
+ (input, TransactionU16LenLimited::new(tx.clone()).unwrap())
+ })
+ .collect();
+
+ inputs.append(&mut temp);
+ }
+
+ inputs
+ }
+
+ fn generate_fixed_number_of_outputs(count: u16) -> Vec<TxOut> {
+ // Set a constant value for each TxOut
+ generate_outputs(&vec![TestOutput::P2WPKH(1_000_000); count as usize])
+ }
+
+ fn generate_p2sh_script_pubkey() -> ScriptBuf {
+ Builder::new().push_opcode(opcodes::OP_TRUE).into_script().to_p2sh()
+ }
+
+ fn generate_non_witness_output(value: u64) -> TxOut {
+ TxOut { value, script_pubkey: generate_p2sh_script_pubkey() }
+ }
+
+ #[test]
+ fn test_interactive_tx_constructor() {
+ do_test_interactive_tx_constructor(TestSession {
+ description: "No contributions",
+ inputs_a: vec![],
+ outputs_a: vec![],
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)),
+ });
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Single contribution, no initiator inputs",
+ inputs_a: vec![],
+ outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::OutputsValueExceedsInputsValue, ErrorCulprit::NodeA)),
+ });
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Single contribution, no initiator outputs",
+ inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000)]),
+ outputs_a: vec![],
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: None,
+ });
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Single contribution, no fees",
+ inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000)]),
+ outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)),
+ });
+ let p2wpkh_fee = fee_for_weight(TEST_FEERATE_SATS_PER_KW, P2WPKH_INPUT_WEIGHT_LOWER_BOUND);
+ let outputs_fee = fee_for_weight(
+ TEST_FEERATE_SATS_PER_KW,
+ get_output_weight(&generate_p2wpkh_script_pubkey()).to_wu(),
+ );
+ let tx_common_fields_fee =
+ fee_for_weight(TEST_FEERATE_SATS_PER_KW, TX_COMMON_FIELDS_WEIGHT);
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Single contribution, with P2WPKH input, insufficient fees",
+ inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000)]),
+ outputs_a: generate_outputs(&[TestOutput::P2WPKH(
+ 1_000_000 - p2wpkh_fee - outputs_fee - tx_common_fields_fee + 1, /* makes fees insuffcient for initiator */
+ )]),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)),
+ });
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Single contribution with P2WPKH input, sufficient fees",
+ inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000)]),
+ outputs_a: generate_outputs(&[TestOutput::P2WPKH(
+ 1_000_000 - p2wpkh_fee - outputs_fee - tx_common_fields_fee,
+ )]),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: None,
+ });
+ let p2wsh_fee = fee_for_weight(TEST_FEERATE_SATS_PER_KW, P2WSH_INPUT_WEIGHT_LOWER_BOUND);
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Single contribution, with P2WSH input, insufficient fees",
+ inputs_a: generate_inputs(&[TestOutput::P2WSH(1_000_000)]),
+ outputs_a: generate_outputs(&[TestOutput::P2WPKH(
+ 1_000_000 - p2wsh_fee - outputs_fee - tx_common_fields_fee + 1, /* makes fees insuffcient for initiator */
+ )]),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)),
+ });
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Single contribution with P2WSH input, sufficient fees",
+ inputs_a: generate_inputs(&[TestOutput::P2WSH(1_000_000)]),
+ outputs_a: generate_outputs(&[TestOutput::P2WPKH(
+ 1_000_000 - p2wsh_fee - outputs_fee - tx_common_fields_fee,
+ )]),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: None,
+ });
+ let p2tr_fee = fee_for_weight(TEST_FEERATE_SATS_PER_KW, P2TR_INPUT_WEIGHT_LOWER_BOUND);
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Single contribution, with P2TR input, insufficient fees",
+ inputs_a: generate_inputs(&[TestOutput::P2TR(1_000_000)]),
+ outputs_a: generate_outputs(&[TestOutput::P2WPKH(
+ 1_000_000 - p2tr_fee - outputs_fee - tx_common_fields_fee + 1, /* makes fees insuffcient for initiator */
+ )]),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)),
+ });
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Single contribution with P2TR input, sufficient fees",
+ inputs_a: generate_inputs(&[TestOutput::P2TR(1_000_000)]),
+ outputs_a: generate_outputs(&[TestOutput::P2WPKH(
+ 1_000_000 - p2tr_fee - outputs_fee - tx_common_fields_fee,
+ )]),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: None,
+ });
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Initiator contributes sufficient fees, but non-initiator does not",
+ inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000)]),
+ outputs_a: vec![],
+ inputs_b: generate_inputs(&[TestOutput::P2WPKH(100_000)]),
+ outputs_b: generate_outputs(&[TestOutput::P2WPKH(100_000)]),
+ expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeB)),
+ });
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Multi-input-output contributions from both sides",
+ inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000); 2]),
+ outputs_a: generate_outputs(&[
+ TestOutput::P2WPKH(1_000_000),
+ TestOutput::P2WPKH(200_000),
+ ]),
+ inputs_b: generate_inputs(&[
+ TestOutput::P2WPKH(1_000_000),
+ TestOutput::P2WPKH(500_000),
+ ]),
+ outputs_b: generate_outputs(&[
+ TestOutput::P2WPKH(1_000_000),
+ TestOutput::P2WPKH(400_000),
+ ]),
+ expect_error: None,
+ });
+
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Prevout from initiator is not a witness program",
+ inputs_a: generate_inputs(&[TestOutput::P2PKH(1_000_000)]),
+ outputs_a: vec![],
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::PrevTxOutInvalid, ErrorCulprit::NodeA)),
+ });
+
+ let tx =
+ TransactionU16LenLimited::new(generate_tx(&[TestOutput::P2WPKH(1_000_000)])).unwrap();
+ let invalid_sequence_input = TxIn {
+ previous_output: OutPoint { txid: tx.as_transaction().txid(), vout: 0 },
+ ..Default::default()
+ };
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Invalid input sequence from initiator",
+ inputs_a: vec![(invalid_sequence_input, tx.clone())],
+ outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::IncorrectInputSequenceValue, ErrorCulprit::NodeA)),
+ });
+ let duplicate_input = TxIn {
+ previous_output: OutPoint { txid: tx.as_transaction().txid(), vout: 0 },
+ sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
+ ..Default::default()
+ };
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Duplicate prevout from initiator",
+ inputs_a: vec![(duplicate_input.clone(), tx.clone()), (duplicate_input, tx.clone())],
+ outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::PrevTxOutInvalid, ErrorCulprit::NodeB)),
+ });
+ let duplicate_input = TxIn {
+ previous_output: OutPoint { txid: tx.as_transaction().txid(), vout: 0 },
+ sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
+ ..Default::default()
+ };
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Non-initiator uses same prevout as initiator",
+ inputs_a: vec![(duplicate_input.clone(), tx.clone())],
+ outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]),
+ inputs_b: vec![(duplicate_input.clone(), tx.clone())],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::PrevTxOutInvalid, ErrorCulprit::NodeA)),
+ });
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Initiator sends too many TxAddInputs",
+ inputs_a: generate_fixed_number_of_inputs(MAX_RECEIVED_TX_ADD_INPUT_COUNT + 1),
+ outputs_a: vec![],
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::ReceivedTooManyTxAddInputs, ErrorCulprit::NodeA)),
+ });
+ do_test_interactive_tx_constructor_with_entropy_source(
+ TestSession {
+ // We use a deliberately bad entropy source, `DuplicateEntropySource` to simulate this.
+ description: "Attempt to queue up two inputs with duplicate serial ids",
+ inputs_a: generate_fixed_number_of_inputs(2),
+ outputs_a: vec![],
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::DuplicateSerialId, ErrorCulprit::NodeA)),
+ },
+ &DuplicateEntropySource,
+ );
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Initiator sends too many TxAddOutputs",
+ inputs_a: vec![],
+ outputs_a: generate_fixed_number_of_outputs(MAX_RECEIVED_TX_ADD_OUTPUT_COUNT + 1),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::ReceivedTooManyTxAddOutputs, ErrorCulprit::NodeA)),
+ });
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Initiator sends an output below dust value",
+ inputs_a: vec![],
+ outputs_a: generate_outputs(&[TestOutput::P2WSH(
+ generate_p2wsh_script_pubkey().dust_value().to_sat() - 1,
+ )]),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::BelowDustLimit, ErrorCulprit::NodeA)),
+ });
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Initiator sends an output above maximum sats allowed",
+ inputs_a: vec![],
+ outputs_a: generate_outputs(&[TestOutput::P2WPKH(TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1)]),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::ExceededMaximumSatsAllowed, ErrorCulprit::NodeA)),
+ });
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Initiator sends an output without a witness program",
+ inputs_a: vec![],
+ outputs_a: vec![generate_non_witness_output(1_000_000)],
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::InvalidOutputScript, ErrorCulprit::NodeA)),
+ });
+ do_test_interactive_tx_constructor_with_entropy_source(
+ TestSession {
+ // We use a deliberately bad entropy source, `DuplicateEntropySource` to simulate this.
+ description: "Attempt to queue up two outputs with duplicate serial ids",
+ inputs_a: vec![],
+ outputs_a: generate_fixed_number_of_outputs(2),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::DuplicateSerialId, ErrorCulprit::NodeA)),
+ },
+ &DuplicateEntropySource,
+ );
+
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Peer contributed more output value than inputs",
+ inputs_a: generate_inputs(&[TestOutput::P2WPKH(100_000)]),
+ outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::OutputsValueExceedsInputsValue, ErrorCulprit::NodeA)),
+ });
+
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Peer contributed more than allowed number of inputs",
+ inputs_a: generate_fixed_number_of_inputs(MAX_INPUTS_OUTPUTS_COUNT as u16 + 1),
+ outputs_a: vec![],
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((
+ AbortReason::ExceededNumberOfInputsOrOutputs,
+ ErrorCulprit::Indeterminate,
+ )),
+ });
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Peer contributed more than allowed number of outputs",
+ inputs_a: generate_inputs(&[TestOutput::P2WPKH(TOTAL_BITCOIN_SUPPLY_SATOSHIS)]),
+ outputs_a: generate_fixed_number_of_outputs(MAX_INPUTS_OUTPUTS_COUNT as u16 + 1),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((
+ AbortReason::ExceededNumberOfInputsOrOutputs,
+ ErrorCulprit::Indeterminate,
+ )),
+ });
+ }
+
+ #[test]
+ fn test_generate_local_serial_id() {
+ let entropy_source = TestEntropySource(AtomicCounter::new());
+
+ // Initiators should have even serial id, non-initiators should have odd serial id.
+ assert_eq!(generate_holder_serial_id(&&entropy_source, true) % 2, 0);
+ assert_eq!(generate_holder_serial_id(&&entropy_source, false) % 2, 1)
+ }
+}
#[cfg(all(test, async_signing))]
#[allow(unused_mut)]
mod async_signer_tests;
+#[cfg(test)]
+#[allow(unused_mut)]
+mod offers_tests;
+#[allow(dead_code)] // TODO(dual_funding): Exchange for dual_funding cfg
+pub(crate) mod interactivetxs;
pub use self::peer_channel_encryptor::LN_MAX_MSG_LEN;
+use bitcoin::hashes::{sha256::Hash as Sha256, Hash};
+
/// payment_hash type, use to cross-lock hop
///
/// This is not exported to bindings users as we just use [u8; 32] directly
}
}
+/// Converts a `PaymentPreimage` into a `PaymentHash` by hashing the preimage with SHA256.
+impl From<PaymentPreimage> for PaymentHash {
+ fn from(value: PaymentPreimage) -> Self {
+ PaymentHash(Sha256::hash(&value.0).to_byte_array())
+ }
+}
+
/// payment_secret type, use to authenticate sender to the receiver and tie MPP HTLCs together
///
/// This is not exported to bindings users as we just use [u8; 32] directly
#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug, Ord, PartialOrd)]
pub struct PaymentSecret(pub [u8; 32]);
+#[allow(unused_imports)]
use crate::prelude::*;
+
use bitcoin::bech32;
use bitcoin::bech32::{Base32Len, FromBase32, ToBase32, WriteBase32, u5};
//! Further functional tests which test blockchain reorganizations.
-use crate::sign::{ecdsa::EcdsaChannelSigner, SpendableOutputDescriptor};
+use crate::sign::{ecdsa::EcdsaChannelSigner, OutputSpender, SpendableOutputDescriptor};
use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS, Balance};
use crate::chain::transaction::OutPoint;
use crate::chain::chaininterface::{LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight};
use crate::events::bump_transaction::{BumpTransactionEvent, WalletSource};
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
-use crate::ln::channel;
+use crate::ln::{channel, ChannelId};
use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, PaymentId, RecipientOnionFields};
use crate::ln::msgs::ChannelMessageHandler;
use crate::util::config::UserConfig;
expect_payment_failed!(nodes[1], payment_hash_1, false);
}
+#[test]
+fn archive_fully_resolved_monitors() {
+ // Test we can archive fully resolved channel monitor.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let mut user_config = test_default_channel_config();
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config), Some(user_config)]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let (_, _, chan_id, funding_tx) =
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 1_000_000);
+
+ nodes[0].node.close_channel(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
+ let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
+ let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown);
+
+ let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
+ let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed);
+ let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
+ let (_, _) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+
+ let shutdown_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+
+ mine_transaction(&nodes[0], &shutdown_tx[0]);
+ mine_transaction(&nodes[1], &shutdown_tx[0]);
+
+ connect_blocks(&nodes[0], 6);
+ connect_blocks(&nodes[1], 6);
+
+ check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
+
+ assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1);
+ // First archive should set balances_empty_height to current block height
+ nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors();
+ assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1);
+ connect_blocks(&nodes[0], 4032);
+ // Second call after 4032 blocks, should archive the monitor
+ nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors();
+ // Should have no monitors left
+ assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 0);
+ // Remove the corresponding outputs and transactions the chain source is
+ // watching. This is to make sure the `Drop` function assertions pass.
+ nodes.get_mut(0).unwrap().chain_source.remove_watched_txn_and_outputs(
+ OutPoint { txid: funding_tx.txid(), index: 0 },
+ funding_tx.output[0].script_pubkey.clone()
+ );
+}
+
fn do_chanmon_claim_value_coop_close(anchors: bool) {
// Tests `get_claimable_balances` returns the correct values across a simple cooperative claim.
// Specifically, this tests that the channel non-HTLC balances show up in
let (_, _, chan_id, funding_tx) =
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 1_000_000);
let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
- assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+ assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
let chan_feerate = get_feerate!(nodes[0], nodes[1], chan_id) as u64;
let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_id);
assert_eq!(shutdown_tx, nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0));
assert_eq!(shutdown_tx.len(), 1);
- let shutdown_tx_conf_height_a = block_from_scid(&mine_transaction(&nodes[0], &shutdown_tx[0]));
- let shutdown_tx_conf_height_b = block_from_scid(&mine_transaction(&nodes[1], &shutdown_tx[0]));
+ let shutdown_tx_conf_height_a = block_from_scid(mine_transaction(&nodes[0], &shutdown_tx[0]));
+ let shutdown_tx_conf_height_b = block_from_scid(mine_transaction(&nodes[1], &shutdown_tx[0]));
assert!(nodes[0].node.list_channels().is_empty());
assert!(nodes[1].node.list_channels().is_empty());
spendable_outputs_b
);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
+ check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
}
#[test]
let (_, _, chan_id, funding_tx) =
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 1_000_000);
let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
- assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+ assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
// This HTLC is immediately claimed, giving node B the preimage
let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
check_spends!(commitment_tx, funding_tx);
commitment_tx
};
- let commitment_tx_conf_height_a = block_from_scid(&mine_transaction(&nodes[0], &commitment_tx));
+ let commitment_tx_conf_height_a = block_from_scid(mine_transaction(&nodes[0], &commitment_tx));
if nodes[0].connect_style.borrow().updates_best_block_first() {
let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);
let (_, _, chan_id, funding_tx) =
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000);
let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
- assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+ assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
// We create five HTLCs for B to claim against A's revoked commitment transaction:
//
assert!(failed_payments.is_empty());
if let Event::PendingHTLCsForwardable { .. } = events[0] {} else { panic!(); }
match &events[1] {
- Event::ChannelClosed { reason: ClosureReason::HolderForceClosed, .. } => {},
+ Event::ChannelClosed { reason: ClosureReason::HTLCsTimedOut, .. } => {},
_ => panic!(),
}
connect_blocks(&nodes[1], htlc_cltv_timeout + 1 - 10);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 1000000);
+ check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 1000000);
// Prior to channel closure, B considers the preimage HTLC as its own, and otherwise only
// lists the two on-chain timeout-able HTLCs as claimable balances.
let (_, _, chan_id, funding_tx) =
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 12_000_000);
let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
- assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+ assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
let failed_payment_hash = route_payment(&nodes[1], &[&nodes[0]], 1_000_000).1;
let (_, _, chan_id, funding_tx) =
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000);
let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
- assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+ assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
// We create two HTLCs, one which we will give A the preimage to to generate an HTLC-Success
// transaction, and one which we will not, allowing B to claim the HTLC output in an aggregated
check_spends!(revoked_htlc_claim, htlc_tx);
}
- let mut revoked_claim_transaction_map = HashMap::new();
+ let mut revoked_claim_transaction_map = new_hash_map();
for current_tx in txn.into_iter() {
revoked_claim_transaction_map.insert(current_tx.txid(), current_tx);
}
// We should expect our round trip serialization check to fail as we're writing the monitor
// with the incorrect P2WPKH script but reading it with the correct P2WSH script.
*nodes[1].chain_monitor.expect_monitor_round_trip_fail.lock().unwrap() = Some(chan_id);
- let commitment_tx_conf_height = block_from_scid(&mine_transaction(&nodes[1], &commitment_tx));
+ let commitment_tx_conf_height = block_from_scid(mine_transaction(&nodes[1], &commitment_tx));
let serialized_monitor = get_monitor!(nodes[1], chan_id).encode();
reload_node!(nodes[1], user_config, &nodes[1].node.encode(), &[&serialized_monitor], persister, chain_monitor, node_deserialized);
commitment_tx_conf_height
} else {
let serialized_monitor = get_monitor!(nodes[1], chan_id).encode();
reload_node!(nodes[1], user_config, &nodes[1].node.encode(), &[&serialized_monitor], persister, chain_monitor, node_deserialized);
- let commitment_tx_conf_height = block_from_scid(&mine_transaction(&nodes[1], &commitment_tx));
+ let commitment_tx_conf_height = block_from_scid(mine_transaction(&nodes[1], &commitment_tx));
check_added_monitors(&nodes[1], 1);
check_closed_broadcast(&nodes[1], 1, true);
commitment_tx_conf_height
(&nodes[0], &nodes[1])
};
- closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node.node.get_our_node_id()).unwrap();
+ get_monitor!(closing_node, chan_id).broadcast_latest_holder_commitment_txn(
+ &closing_node.tx_broadcaster, &closing_node.fee_estimator, &closing_node.logger
+ );
// The commitment transaction comes first.
let commitment_tx = {
mine_transaction(closing_node, &commitment_tx);
check_added_monitors!(closing_node, 1);
check_closed_broadcast!(closing_node, true);
- check_closed_event!(closing_node, 1, ClosureReason::HolderForceClosed, [other_node.node.get_our_node_id()], 1_000_000);
+ check_closed_event!(closing_node, 1, ClosureReason::CommitmentTxConfirmed, [other_node.node.get_our_node_id()], 1_000_000);
mine_transaction(other_node, &commitment_tx);
check_added_monitors!(other_node, 1);
use crate::onion_message;
use crate::sign::{NodeSigner, Recipient};
+#[allow(unused_imports)]
use crate::prelude::*;
-#[cfg(feature = "std")]
-use core::convert::TryFrom;
+
use core::fmt;
use core::fmt::Debug;
use core::ops::Deref;
Io(io::ErrorKind),
/// The message included zlib-compressed values, which we don't support.
UnsupportedCompression,
+ /// Value is validly encoded but is dangerous to use.
+ ///
+ /// This is used for things like [`ChannelManager`] deserialization where we want to ensure
+ /// that we don't use a [`ChannelManager`] which is in out of sync with the [`ChannelMonitor`].
+ /// This indicates that there is a critical implementation flaw in the storage implementation
+ /// and it's unsafe to continue.
+ ///
+ /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
+ /// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor
+ DangerousValue,
}
/// An [`init`] message to be sent to or received from a peer.
pub byteslen: u16,
}
-/// An [`open_channel`] message to be sent to or received from a peer.
-///
-/// Used in V1 channel establishment
+/// Contains fields that are both common to [`open_channel`] and `open_channel2` messages.
///
/// [`open_channel`]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#the-open_channel-message
-#[derive(Clone, Debug, Hash, PartialEq, Eq)]
-pub struct OpenChannel {
- /// The genesis hash of the blockchain where the channel is to be opened
- pub chain_hash: ChainHash,
- /// A temporary channel ID, until the funding outpoint is announced
- pub temporary_channel_id: ChannelId,
- /// The channel value
- pub funding_satoshis: u64,
- /// The amount to push to the counterparty as part of the open, in milli-satoshi
- pub push_msat: u64,
- /// The threshold below which outputs on transactions broadcast by sender will be omitted
- pub dust_limit_satoshis: u64,
- /// The maximum inbound HTLC value in flight towards sender, in milli-satoshi
- pub max_htlc_value_in_flight_msat: u64,
- /// The minimum value unencumbered by HTLCs for the counterparty to keep in the channel
- pub channel_reserve_satoshis: u64,
- /// The minimum HTLC size incoming to sender, in milli-satoshi
- pub htlc_minimum_msat: u64,
- /// The feerate per 1000-weight of sender generated transactions, until updated by
- /// [`UpdateFee`]
- pub feerate_per_kw: u32,
- /// The number of blocks which the counterparty will have to wait to claim on-chain funds if
- /// they broadcast a commitment transaction
- pub to_self_delay: u16,
- /// The maximum number of inbound HTLCs towards sender
- pub max_accepted_htlcs: u16,
- /// The sender's key controlling the funding transaction
- pub funding_pubkey: PublicKey,
- /// Used to derive a revocation key for transactions broadcast by counterparty
- pub revocation_basepoint: PublicKey,
- /// A payment key to sender for transactions broadcast by counterparty
- pub payment_point: PublicKey,
- /// Used to derive a payment key to sender for transactions broadcast by sender
- pub delayed_payment_basepoint: PublicKey,
- /// Used to derive an HTLC payment key to sender
- pub htlc_basepoint: PublicKey,
- /// The first to-be-broadcast-by-sender transaction's per commitment point
- pub first_per_commitment_point: PublicKey,
- /// The channel flags to be used
- pub channel_flags: u8,
- /// A request to pre-set the to-sender output's `scriptPubkey` for when we collaboratively close
- pub shutdown_scriptpubkey: Option<ScriptBuf>,
- /// The channel type that this channel will represent
- ///
- /// If this is `None`, we derive the channel type from the intersection of our
- /// feature bits with our counterparty's feature bits from the [`Init`] message.
- pub channel_type: Option<ChannelTypeFeatures>,
-}
-
-/// An open_channel2 message to be sent by or received from the channel initiator.
-///
-/// Used in V2 channel establishment
-///
// TODO(dual_funding): Add spec link for `open_channel2`.
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
-pub struct OpenChannelV2 {
+pub struct CommonOpenChannelFields {
/// The genesis hash of the blockchain where the channel is to be opened
pub chain_hash: ChainHash,
- /// A temporary channel ID derived using a zeroed out value for the channel acceptor's revocation basepoint
+ /// A temporary channel ID
+ /// For V2 channels: derived using a zeroed out value for the channel acceptor's revocation basepoint
+ /// For V1 channels: a temporary channel ID, until the funding outpoint is announced
pub temporary_channel_id: ChannelId,
- /// The feerate for the funding transaction set by the channel initiator
- pub funding_feerate_sat_per_1000_weight: u32,
- /// The feerate for the commitment transaction set by the channel initiator
- pub commitment_feerate_sat_per_1000_weight: u32,
- /// Part of the channel value contributed by the channel initiator
+ /// For V1 channels: The channel value
+ /// For V2 channels: Part of the channel value contributed by the channel initiator
pub funding_satoshis: u64,
/// The threshold below which outputs on transactions broadcast by the channel initiator will be
/// omitted
pub max_htlc_value_in_flight_msat: u64,
/// The minimum HTLC size incoming to channel initiator, in milli-satoshi
pub htlc_minimum_msat: u64,
+ /// The feerate for the commitment transaction set by the channel initiator until updated by
+ /// [`UpdateFee`]
+ pub commitment_feerate_sat_per_1000_weight: u32,
/// The number of blocks which the counterparty will have to wait to claim on-chain funds if they
/// broadcast a commitment transaction
pub to_self_delay: u16,
/// The maximum number of inbound HTLCs towards channel initiator
pub max_accepted_htlcs: u16,
- /// The locktime for the funding transaction
- pub locktime: u32,
/// The channel initiator's key controlling the funding transaction
pub funding_pubkey: PublicKey,
/// Used to derive a revocation key for transactions broadcast by counterparty
pub htlc_basepoint: PublicKey,
/// The first to-be-broadcast-by-channel-initiator transaction's per commitment point
pub first_per_commitment_point: PublicKey,
- /// The second to-be-broadcast-by-channel-initiator transaction's per commitment point
- pub second_per_commitment_point: PublicKey,
- /// Channel flags
+ /// The channel flags to be used
pub channel_flags: u8,
/// Optionally, a request to pre-set the to-channel-initiator output's scriptPubkey for when we
/// collaboratively close
pub shutdown_scriptpubkey: Option<ScriptBuf>,
- /// The channel type that this channel will represent. If none is set, we derive the channel
- /// type from the intersection of our feature bits with our counterparty's feature bits from
- /// the Init message.
+ /// The channel type that this channel will represent
+ ///
+ /// If this is `None`, we derive the channel type from the intersection of our
+ /// feature bits with our counterparty's feature bits from the [`Init`] message.
pub channel_type: Option<ChannelTypeFeatures>,
- /// Optionally, a requirement that only confirmed inputs can be added
- pub require_confirmed_inputs: Option<()>,
}
-/// An [`accept_channel`] message to be sent to or received from a peer.
+/// An [`open_channel`] message to be sent to or received from a peer.
///
/// Used in V1 channel establishment
///
-/// [`accept_channel`]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#the-accept_channel-message
+/// [`open_channel`]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#the-open_channel-message
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
-pub struct AcceptChannel {
- /// A temporary channel ID, until the funding outpoint is announced
- pub temporary_channel_id: ChannelId,
- /// The threshold below which outputs on transactions broadcast by sender will be omitted
- pub dust_limit_satoshis: u64,
- /// The maximum inbound HTLC value in flight towards sender, in milli-satoshi
- pub max_htlc_value_in_flight_msat: u64,
+pub struct OpenChannel {
+ /// Common fields of `open_channel(2)`-like messages
+ pub common_fields: CommonOpenChannelFields,
+ /// The amount to push to the counterparty as part of the open, in milli-satoshi
+ pub push_msat: u64,
/// The minimum value unencumbered by HTLCs for the counterparty to keep in the channel
pub channel_reserve_satoshis: u64,
- /// The minimum HTLC size incoming to sender, in milli-satoshi
- pub htlc_minimum_msat: u64,
- /// Minimum depth of the funding transaction before the channel is considered open
- pub minimum_depth: u32,
- /// The number of blocks which the counterparty will have to wait to claim on-chain funds if they broadcast a commitment transaction
- pub to_self_delay: u16,
- /// The maximum number of inbound HTLCs towards sender
- pub max_accepted_htlcs: u16,
- /// The sender's key controlling the funding transaction
- pub funding_pubkey: PublicKey,
- /// Used to derive a revocation key for transactions broadcast by counterparty
- pub revocation_basepoint: PublicKey,
- /// A payment key to sender for transactions broadcast by counterparty
- pub payment_point: PublicKey,
- /// Used to derive a payment key to sender for transactions broadcast by sender
- pub delayed_payment_basepoint: PublicKey,
- /// Used to derive an HTLC payment key to sender for transactions broadcast by counterparty
- pub htlc_basepoint: PublicKey,
- /// The first to-be-broadcast-by-sender transaction's per commitment point
- pub first_per_commitment_point: PublicKey,
- /// A request to pre-set the to-sender output's scriptPubkey for when we collaboratively close
- pub shutdown_scriptpubkey: Option<ScriptBuf>,
- /// The channel type that this channel will represent.
- ///
- /// If this is `None`, we derive the channel type from the intersection of
- /// our feature bits with our counterparty's feature bits from the [`Init`] message.
- /// This is required to match the equivalent field in [`OpenChannel::channel_type`].
- pub channel_type: Option<ChannelTypeFeatures>,
- #[cfg(taproot)]
- /// Next nonce the channel initiator should use to create a funding output signature against
- pub next_local_nonce: Option<musig2::types::PublicNonce>,
}
-/// An accept_channel2 message to be sent by or received from the channel accepter.
+/// An open_channel2 message to be sent by or received from the channel initiator.
///
/// Used in V2 channel establishment
///
+// TODO(dual_funding): Add spec link for `open_channel2`.
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+pub struct OpenChannelV2 {
+ /// Common fields of `open_channel(2)`-like messages
+ pub common_fields: CommonOpenChannelFields,
+ /// The feerate for the funding transaction set by the channel initiator
+ pub funding_feerate_sat_per_1000_weight: u32,
+ /// The locktime for the funding transaction
+ pub locktime: u32,
+ /// The second to-be-broadcast-by-channel-initiator transaction's per commitment point
+ pub second_per_commitment_point: PublicKey,
+ /// Optionally, a requirement that only confirmed inputs can be added
+ pub require_confirmed_inputs: Option<()>,
+}
+
+/// Contains fields that are both common to [`accept_channel`] and `accept_channel2` messages.
+///
+/// [`accept_channel`]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#the-accept_channel-message
// TODO(dual_funding): Add spec link for `accept_channel2`.
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
-pub struct AcceptChannelV2 {
- /// The same `temporary_channel_id` received from the initiator's `open_channel2` message.
+pub struct CommonAcceptChannelFields {
+ /// The same `temporary_channel_id` received from the initiator's `open_channel2` or `open_channel` message.
pub temporary_channel_id: ChannelId,
- /// Part of the channel value contributed by the channel acceptor
- pub funding_satoshis: u64,
/// The threshold below which outputs on transactions broadcast by the channel acceptor will be
/// omitted
pub dust_limit_satoshis: u64,
- /// The maximum inbound HTLC value in flight towards channel acceptor, in milli-satoshi
+ /// The maximum inbound HTLC value in flight towards sender, in milli-satoshi
pub max_htlc_value_in_flight_msat: u64,
/// The minimum HTLC size incoming to channel acceptor, in milli-satoshi
pub htlc_minimum_msat: u64,
pub htlc_basepoint: PublicKey,
/// The first to-be-broadcast-by-channel-acceptor transaction's per commitment point
pub first_per_commitment_point: PublicKey,
- /// The second to-be-broadcast-by-channel-acceptor transaction's per commitment point
- pub second_per_commitment_point: PublicKey,
/// Optionally, a request to pre-set the to-channel-acceptor output's scriptPubkey for when we
/// collaboratively close
pub shutdown_scriptpubkey: Option<ScriptBuf>,
/// type from the intersection of our feature bits with our counterparty's feature bits from
/// the Init message.
///
- /// This is required to match the equivalent field in [`OpenChannelV2::channel_type`].
+ /// This is required to match the equivalent field in [`OpenChannel`] or [`OpenChannelV2`]'s
+ /// [`CommonOpenChannelFields::channel_type`].
pub channel_type: Option<ChannelTypeFeatures>,
+}
+
+/// An [`accept_channel`] message to be sent to or received from a peer.
+///
+/// Used in V1 channel establishment
+///
+/// [`accept_channel`]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#the-accept_channel-message
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+pub struct AcceptChannel {
+ /// Common fields of `accept_channel(2)`-like messages
+ pub common_fields: CommonAcceptChannelFields,
+ /// The minimum value unencumbered by HTLCs for the counterparty to keep in the channel
+ pub channel_reserve_satoshis: u64,
+ #[cfg(taproot)]
+ /// Next nonce the channel initiator should use to create a funding output signature against
+ pub next_local_nonce: Option<musig2::types::PublicNonce>,
+}
+
+/// An accept_channel2 message to be sent by or received from the channel accepter.
+///
+/// Used in V2 channel establishment
+///
+// TODO(dual_funding): Add spec link for `accept_channel2`.
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+pub struct AcceptChannelV2 {
+ /// Common fields of `accept_channel(2)`-like messages
+ pub common_fields: CommonAcceptChannelFields,
+ /// Part of the channel value contributed by the channel acceptor
+ pub funding_satoshis: u64,
+ /// The second to-be-broadcast-by-channel-acceptor transaction's per commitment point
+ pub second_per_commitment_point: PublicKey,
/// Optionally, a requirement that only confirmed inputs can be added
pub require_confirmed_inputs: Option<()>,
}
pub short_channel_id_alias: Option<u64>,
}
+/// A randomly chosen number that is used to identify inputs within an interactive transaction
+/// construction.
+pub type SerialId = u64;
+
/// An stfu (quiescence) message to be sent by or received from the stfu initiator.
// TODO(splicing): Add spec link for `stfu`; still in draft, using from https://github.com/lightning/bolts/pull/863
#[derive(Clone, Debug, PartialEq, Eq)]
pub channel_id: ChannelId,
/// A randomly chosen unique identifier for this input, which is even for initiators and odd for
/// non-initiators.
- pub serial_id: u64,
+ pub serial_id: SerialId,
/// Serialized transaction that contains the output this input spends to verify that it is non
/// malleable.
pub prevtx: TransactionU16LenLimited,
pub channel_id: ChannelId,
/// A randomly chosen unique identifier for this output, which is even for initiators and odd for
/// non-initiators.
- pub serial_id: u64,
+ pub serial_id: SerialId,
/// The satoshi value of the output
pub sats: u64,
/// The scriptPubKey for the output
/// The channel ID
pub channel_id: ChannelId,
/// The serial ID of the input to be removed
- pub serial_id: u64,
+ pub serial_id: SerialId,
}
/// A tx_remove_output message for removing an output during interactive transaction construction.
/// The channel ID
pub channel_id: ChannelId,
/// The serial ID of the output to be removed
- pub serial_id: u64,
+ pub serial_id: SerialId,
}
/// A tx_complete message signalling the conclusion of a peer's transaction contributions during
pub tx_hash: Txid,
/// The list of witnesses
pub witnesses: Vec<Witness>,
+ /// Optional signature for the shared input -- the previous funding outpoint -- signed by both peers
+ pub funding_outpoint_sig: Option<Signature>,
}
/// A tx_init_rbf message which initiates a replacement of the transaction after it's been
/// This maximum length is reached by a hostname address descriptor:
/// a hostname with a maximum length of 255, its 1-byte length and a 2-byte port.
pub(crate) const MAX_LEN: u16 = 258;
+
+ pub(crate) fn is_tor(&self) -> bool {
+ match self {
+ &SocketAddress::TcpIpV4 {..} => false,
+ &SocketAddress::TcpIpV6 {..} => false,
+ &SocketAddress::OnionV2(_) => true,
+ &SocketAddress::OnionV3 {..} => true,
+ &SocketAddress::Hostname {..} => false,
+ }
+ }
}
impl Writeable for SocketAddress {
pub alias: NodeAlias,
/// List of addresses on which this node is reachable
pub addresses: Vec<SocketAddress>,
- pub(crate) excess_address_data: Vec<u8>,
- pub(crate) excess_data: Vec<u8>,
+ /// Excess address data which was signed as a part of the message which we do not (yet) understand how
+ /// to decode.
+ ///
+ /// This is stored to ensure forward-compatibility as new address types are added to the lightning gossip protocol.
+ pub excess_address_data: Vec<u8>,
+ /// Excess data which was signed as a part of the message which we do not (yet) understand how
+ /// to decode.
+ ///
+ /// This is stored to ensure forward-compatibility as new fields are added to the lightning gossip protocol.
+ pub excess_data: Vec<u8>,
}
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
/// A [`node_announcement`] message to be sent to or received from a peer.
// Splicing
/// Handle an incoming `splice` message from the given peer.
+ #[cfg(splicing)]
fn handle_splice(&self, their_node_id: &PublicKey, msg: &Splice);
/// Handle an incoming `splice_ack` message from the given peer.
+ #[cfg(splicing)]
fn handle_splice_ack(&self, their_node_id: &PublicKey, msg: &SpliceAck);
/// Handle an incoming `splice_locked` message from the given peer.
+ #[cfg(splicing)]
fn handle_splice_locked(&self, their_node_id: &PublicKey, msg: &SpliceLocked);
// Interactive channel construction
mod fuzzy_internal_msgs {
use bitcoin::secp256k1::PublicKey;
- use crate::blinded_path::payment::{PaymentConstraints, PaymentRelay};
- use crate::prelude::*;
+ use crate::blinded_path::payment::{PaymentConstraints, PaymentContext, PaymentRelay};
use crate::ln::{PaymentPreimage, PaymentSecret};
use crate::ln::features::BlindedHopFeatures;
- use super::FinalOnionHopData;
+ use super::{FinalOnionHopData, TrampolineOnionPacket};
+
+ #[allow(unused_imports)]
+ use crate::prelude::*;
// These types aren't intended to be pub, but are exposed for direct fuzzing (as we deserialize
// them from untrusted input):
cltv_expiry_height: u32,
payment_secret: PaymentSecret,
payment_constraints: PaymentConstraints,
+ payment_context: PaymentContext,
intro_node_blinding_point: Option<PublicKey>,
+ keysend_preimage: Option<PaymentPreimage>,
+ custom_tlvs: Vec<(u64, Vec<u8>)>,
}
}
amt_to_forward: u64,
outgoing_cltv_value: u32,
},
+ #[allow(unused)]
+ TrampolineEntrypoint {
+ amt_to_forward: u64,
+ outgoing_cltv_value: u32,
+ multipath_trampoline_data: Option<FinalOnionHopData>,
+ trampoline_packet: TrampolineOnionPacket,
+ },
Receive {
payment_data: Option<FinalOnionHopData>,
payment_metadata: Option<Vec<u8>>,
cltv_expiry_height: u32,
encrypted_tlvs: Vec<u8>,
intro_node_blinding_point: Option<PublicKey>, // Set if the introduction node of the blinded path is the final node
+ keysend_preimage: Option<PaymentPreimage>,
+ custom_tlvs: Vec<(u64, Vec<u8>)>,
+ }
+ }
+
+ pub(crate) enum OutboundTrampolinePayload {
+ #[allow(unused)]
+ Forward {
+ /// The value, in msat, of the payment after this hop's fee is deducted.
+ amt_to_forward: u64,
+ outgoing_cltv_value: u32,
+ /// The node id to which the trampoline node must find a route
+ outgoing_node_id: PublicKey,
}
}
}
}
+/// BOLT 4 onion packet including hop data for the next peer.
+#[derive(Clone, Hash, PartialEq, Eq)]
+pub struct TrampolineOnionPacket {
+ /// Bolt 04 version number
+ pub version: u8,
+ /// A random sepc256k1 point, used to build the ECDH shared secret to decrypt hop_data
+ pub public_key: PublicKey,
+ /// Encrypted payload for the next hop
+ //
+ // Unlike the onion packets used for payments, Trampoline onion packets have to be shorter than
+ // 1300 bytes. The expected default is 650 bytes.
+ // TODO: if 650 ends up being the most common size, optimize this to be:
+ // enum { SixFifty([u8; 650]), VarLen(Vec<u8>) }
+ pub hop_data: Vec<u8>,
+ /// HMAC to verify the integrity of hop_data
+ pub hmac: [u8; 32],
+}
+
+impl onion_utils::Packet for TrampolineOnionPacket {
+ type Data = Vec<u8>;
+ fn new(public_key: PublicKey, hop_data: Vec<u8>, hmac: [u8; 32]) -> Self {
+ Self {
+ version: 0,
+ public_key,
+ hop_data,
+ hmac,
+ }
+ }
+}
+
+impl Writeable for TrampolineOnionPacket {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ self.version.write(w)?;
+ self.public_key.write(w)?;
+ w.write_all(&self.hop_data)?;
+ self.hmac.write(w)?;
+ Ok(())
+ }
+}
+
+impl Debug for TrampolineOnionPacket {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.write_fmt(format_args!("TrampolineOnionPacket version {} with hmac {:?}", self.version, &self.hmac[..]))
+ }
+}
+
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub(crate) struct OnionErrorPacket {
// This really should be a constant size slice, but the spec lets these things be up to 128KB?
DecodeError::BadLengthDescriptor => f.write_str("A length descriptor in the packet didn't describe the later data correctly"),
DecodeError::Io(ref e) => fmt::Debug::fmt(e, f),
DecodeError::UnsupportedCompression => f.write_str("We don't support receiving messages with zlib-compressed fields"),
+ DecodeError::DangerousValue => f.write_str("Value would be dangerous to continue execution with"),
}
}
}
}
}
-#[cfg(not(taproot))]
-impl_writeable_msg!(AcceptChannel, {
- temporary_channel_id,
- dust_limit_satoshis,
- max_htlc_value_in_flight_msat,
- channel_reserve_satoshis,
- htlc_minimum_msat,
- minimum_depth,
- to_self_delay,
- max_accepted_htlcs,
- funding_pubkey,
- revocation_basepoint,
- payment_point,
- delayed_payment_basepoint,
- htlc_basepoint,
- first_per_commitment_point,
-}, {
- (0, shutdown_scriptpubkey, (option, encoding: (ScriptBuf, WithoutLength))), // Don't encode length twice.
- (1, channel_type, option),
-});
+impl Writeable for AcceptChannel {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ self.common_fields.temporary_channel_id.write(w)?;
+ self.common_fields.dust_limit_satoshis.write(w)?;
+ self.common_fields.max_htlc_value_in_flight_msat.write(w)?;
+ self.channel_reserve_satoshis.write(w)?;
+ self.common_fields.htlc_minimum_msat.write(w)?;
+ self.common_fields.minimum_depth.write(w)?;
+ self.common_fields.to_self_delay.write(w)?;
+ self.common_fields.max_accepted_htlcs.write(w)?;
+ self.common_fields.funding_pubkey.write(w)?;
+ self.common_fields.revocation_basepoint.write(w)?;
+ self.common_fields.payment_basepoint.write(w)?;
+ self.common_fields.delayed_payment_basepoint.write(w)?;
+ self.common_fields.htlc_basepoint.write(w)?;
+ self.common_fields.first_per_commitment_point.write(w)?;
+ #[cfg(not(taproot))]
+ encode_tlv_stream!(w, {
+ (0, self.common_fields.shutdown_scriptpubkey.as_ref().map(|s| WithoutLength(s)), option), // Don't encode length twice.
+ (1, self.common_fields.channel_type, option),
+ });
+ #[cfg(taproot)]
+ encode_tlv_stream!(w, {
+ (0, self.common_fields.shutdown_scriptpubkey.as_ref().map(|s| WithoutLength(s)), option), // Don't encode length twice.
+ (1, self.common_fields.channel_type, option),
+ (4, self.next_local_nonce, option),
+ });
+ Ok(())
+ }
+}
-#[cfg(taproot)]
-impl_writeable_msg!(AcceptChannel, {
- temporary_channel_id,
- dust_limit_satoshis,
- max_htlc_value_in_flight_msat,
- channel_reserve_satoshis,
- htlc_minimum_msat,
- minimum_depth,
- to_self_delay,
- max_accepted_htlcs,
- funding_pubkey,
- revocation_basepoint,
- payment_point,
- delayed_payment_basepoint,
- htlc_basepoint,
- first_per_commitment_point,
-}, {
- (0, shutdown_scriptpubkey, (option, encoding: (ScriptBuf, WithoutLength))), // Don't encode length twice.
- (1, channel_type, option),
- (4, next_local_nonce, option),
-});
+impl Readable for AcceptChannel {
+ fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let temporary_channel_id: ChannelId = Readable::read(r)?;
+ let dust_limit_satoshis: u64 = Readable::read(r)?;
+ let max_htlc_value_in_flight_msat: u64 = Readable::read(r)?;
+ let channel_reserve_satoshis: u64 = Readable::read(r)?;
+ let htlc_minimum_msat: u64 = Readable::read(r)?;
+ let minimum_depth: u32 = Readable::read(r)?;
+ let to_self_delay: u16 = Readable::read(r)?;
+ let max_accepted_htlcs: u16 = Readable::read(r)?;
+ let funding_pubkey: PublicKey = Readable::read(r)?;
+ let revocation_basepoint: PublicKey = Readable::read(r)?;
+ let payment_basepoint: PublicKey = Readable::read(r)?;
+ let delayed_payment_basepoint: PublicKey = Readable::read(r)?;
+ let htlc_basepoint: PublicKey = Readable::read(r)?;
+ let first_per_commitment_point: PublicKey = Readable::read(r)?;
+
+ let mut shutdown_scriptpubkey: Option<ScriptBuf> = None;
+ let mut channel_type: Option<ChannelTypeFeatures> = None;
+ #[cfg(not(taproot))]
+ decode_tlv_stream!(r, {
+ (0, shutdown_scriptpubkey, (option, encoding: (ScriptBuf, WithoutLength))),
+ (1, channel_type, option),
+ });
+ #[cfg(taproot)]
+ let mut next_local_nonce: Option<musig2::types::PublicNonce> = None;
+ #[cfg(taproot)]
+ decode_tlv_stream!(r, {
+ (0, shutdown_scriptpubkey, (option, encoding: (ScriptBuf, WithoutLength))),
+ (1, channel_type, option),
+ (4, next_local_nonce, option),
+ });
-impl_writeable_msg!(AcceptChannelV2, {
- temporary_channel_id,
- funding_satoshis,
- dust_limit_satoshis,
- max_htlc_value_in_flight_msat,
- htlc_minimum_msat,
- minimum_depth,
- to_self_delay,
- max_accepted_htlcs,
- funding_pubkey,
- revocation_basepoint,
- payment_basepoint,
- delayed_payment_basepoint,
- htlc_basepoint,
- first_per_commitment_point,
- second_per_commitment_point,
-}, {
- (0, shutdown_scriptpubkey, option),
- (1, channel_type, option),
- (2, require_confirmed_inputs, option),
-});
+ Ok(AcceptChannel {
+ common_fields: CommonAcceptChannelFields {
+ temporary_channel_id,
+ dust_limit_satoshis,
+ max_htlc_value_in_flight_msat,
+ htlc_minimum_msat,
+ minimum_depth,
+ to_self_delay,
+ max_accepted_htlcs,
+ funding_pubkey,
+ revocation_basepoint,
+ payment_basepoint,
+ delayed_payment_basepoint,
+ htlc_basepoint,
+ first_per_commitment_point,
+ shutdown_scriptpubkey,
+ channel_type,
+ },
+ channel_reserve_satoshis,
+ #[cfg(taproot)]
+ next_local_nonce,
+ })
+ }
+}
+
+impl Writeable for AcceptChannelV2 {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ self.common_fields.temporary_channel_id.write(w)?;
+ self.funding_satoshis.write(w)?;
+ self.common_fields.dust_limit_satoshis.write(w)?;
+ self.common_fields.max_htlc_value_in_flight_msat.write(w)?;
+ self.common_fields.htlc_minimum_msat.write(w)?;
+ self.common_fields.minimum_depth.write(w)?;
+ self.common_fields.to_self_delay.write(w)?;
+ self.common_fields.max_accepted_htlcs.write(w)?;
+ self.common_fields.funding_pubkey.write(w)?;
+ self.common_fields.revocation_basepoint.write(w)?;
+ self.common_fields.payment_basepoint.write(w)?;
+ self.common_fields.delayed_payment_basepoint.write(w)?;
+ self.common_fields.htlc_basepoint.write(w)?;
+ self.common_fields.first_per_commitment_point.write(w)?;
+ self.second_per_commitment_point.write(w)?;
+
+ encode_tlv_stream!(w, {
+ (0, self.common_fields.shutdown_scriptpubkey.as_ref().map(|s| WithoutLength(s)), option), // Don't encode length twice.
+ (1, self.common_fields.channel_type, option),
+ (2, self.require_confirmed_inputs, option),
+ });
+ Ok(())
+ }
+}
+
+impl Readable for AcceptChannelV2 {
+ fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let temporary_channel_id: ChannelId = Readable::read(r)?;
+ let funding_satoshis: u64 = Readable::read(r)?;
+ let dust_limit_satoshis: u64 = Readable::read(r)?;
+ let max_htlc_value_in_flight_msat: u64 = Readable::read(r)?;
+ let htlc_minimum_msat: u64 = Readable::read(r)?;
+ let minimum_depth: u32 = Readable::read(r)?;
+ let to_self_delay: u16 = Readable::read(r)?;
+ let max_accepted_htlcs: u16 = Readable::read(r)?;
+ let funding_pubkey: PublicKey = Readable::read(r)?;
+ let revocation_basepoint: PublicKey = Readable::read(r)?;
+ let payment_basepoint: PublicKey = Readable::read(r)?;
+ let delayed_payment_basepoint: PublicKey = Readable::read(r)?;
+ let htlc_basepoint: PublicKey = Readable::read(r)?;
+ let first_per_commitment_point: PublicKey = Readable::read(r)?;
+ let second_per_commitment_point: PublicKey = Readable::read(r)?;
+
+ let mut shutdown_scriptpubkey: Option<ScriptBuf> = None;
+ let mut channel_type: Option<ChannelTypeFeatures> = None;
+ let mut require_confirmed_inputs: Option<()> = None;
+ decode_tlv_stream!(r, {
+ (0, shutdown_scriptpubkey, (option, encoding: (ScriptBuf, WithoutLength))),
+ (1, channel_type, option),
+ (2, require_confirmed_inputs, option),
+ });
+
+ Ok(AcceptChannelV2 {
+ common_fields: CommonAcceptChannelFields {
+ temporary_channel_id,
+ dust_limit_satoshis,
+ max_htlc_value_in_flight_msat,
+ htlc_minimum_msat,
+ minimum_depth,
+ to_self_delay,
+ max_accepted_htlcs,
+ funding_pubkey,
+ revocation_basepoint,
+ payment_basepoint,
+ delayed_payment_basepoint,
+ htlc_basepoint,
+ first_per_commitment_point,
+ shutdown_scriptpubkey,
+ channel_type,
+ },
+ funding_satoshis,
+ second_per_commitment_point,
+ require_confirmed_inputs,
+ })
+ }
+}
impl_writeable_msg!(Stfu, {
channel_id,
channel_id,
tx_hash,
witnesses,
-}, {});
+}, {
+ (0, funding_outpoint_sig, option),
+});
impl_writeable_msg!(TxInitRbf, {
channel_id,
}
}
-impl_writeable_msg!(OpenChannel, {
- chain_hash,
- temporary_channel_id,
- funding_satoshis,
- push_msat,
- dust_limit_satoshis,
- max_htlc_value_in_flight_msat,
- channel_reserve_satoshis,
- htlc_minimum_msat,
- feerate_per_kw,
- to_self_delay,
- max_accepted_htlcs,
- funding_pubkey,
- revocation_basepoint,
- payment_point,
- delayed_payment_basepoint,
- htlc_basepoint,
- first_per_commitment_point,
- channel_flags,
-}, {
- (0, shutdown_scriptpubkey, (option, encoding: (ScriptBuf, WithoutLength))), // Don't encode length twice.
- (1, channel_type, option),
-});
+impl Writeable for OpenChannel {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ self.common_fields.chain_hash.write(w)?;
+ self.common_fields.temporary_channel_id.write(w)?;
+ self.common_fields.funding_satoshis.write(w)?;
+ self.push_msat.write(w)?;
+ self.common_fields.dust_limit_satoshis.write(w)?;
+ self.common_fields.max_htlc_value_in_flight_msat.write(w)?;
+ self.channel_reserve_satoshis.write(w)?;
+ self.common_fields.htlc_minimum_msat.write(w)?;
+ self.common_fields.commitment_feerate_sat_per_1000_weight.write(w)?;
+ self.common_fields.to_self_delay.write(w)?;
+ self.common_fields.max_accepted_htlcs.write(w)?;
+ self.common_fields.funding_pubkey.write(w)?;
+ self.common_fields.revocation_basepoint.write(w)?;
+ self.common_fields.payment_basepoint.write(w)?;
+ self.common_fields.delayed_payment_basepoint.write(w)?;
+ self.common_fields.htlc_basepoint.write(w)?;
+ self.common_fields.first_per_commitment_point.write(w)?;
+ self.common_fields.channel_flags.write(w)?;
+ encode_tlv_stream!(w, {
+ (0, self.common_fields.shutdown_scriptpubkey.as_ref().map(|s| WithoutLength(s)), option), // Don't encode length twice.
+ (1, self.common_fields.channel_type, option),
+ });
+ Ok(())
+ }
+}
-impl_writeable_msg!(OpenChannelV2, {
- chain_hash,
- temporary_channel_id,
- funding_feerate_sat_per_1000_weight,
- commitment_feerate_sat_per_1000_weight,
- funding_satoshis,
- dust_limit_satoshis,
- max_htlc_value_in_flight_msat,
- htlc_minimum_msat,
- to_self_delay,
- max_accepted_htlcs,
- locktime,
- funding_pubkey,
- revocation_basepoint,
- payment_basepoint,
- delayed_payment_basepoint,
- htlc_basepoint,
- first_per_commitment_point,
- second_per_commitment_point,
- channel_flags,
-}, {
- (0, shutdown_scriptpubkey, option),
- (1, channel_type, option),
- (2, require_confirmed_inputs, option),
-});
+impl Readable for OpenChannel {
+ fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let chain_hash: ChainHash = Readable::read(r)?;
+ let temporary_channel_id: ChannelId = Readable::read(r)?;
+ let funding_satoshis: u64 = Readable::read(r)?;
+ let push_msat: u64 = Readable::read(r)?;
+ let dust_limit_satoshis: u64 = Readable::read(r)?;
+ let max_htlc_value_in_flight_msat: u64 = Readable::read(r)?;
+ let channel_reserve_satoshis: u64 = Readable::read(r)?;
+ let htlc_minimum_msat: u64 = Readable::read(r)?;
+ let commitment_feerate_sat_per_1000_weight: u32 = Readable::read(r)?;
+ let to_self_delay: u16 = Readable::read(r)?;
+ let max_accepted_htlcs: u16 = Readable::read(r)?;
+ let funding_pubkey: PublicKey = Readable::read(r)?;
+ let revocation_basepoint: PublicKey = Readable::read(r)?;
+ let payment_basepoint: PublicKey = Readable::read(r)?;
+ let delayed_payment_basepoint: PublicKey = Readable::read(r)?;
+ let htlc_basepoint: PublicKey = Readable::read(r)?;
+ let first_per_commitment_point: PublicKey = Readable::read(r)?;
+ let channel_flags: u8 = Readable::read(r)?;
+
+ let mut shutdown_scriptpubkey: Option<ScriptBuf> = None;
+ let mut channel_type: Option<ChannelTypeFeatures> = None;
+ decode_tlv_stream!(r, {
+ (0, shutdown_scriptpubkey, (option, encoding: (ScriptBuf, WithoutLength))),
+ (1, channel_type, option),
+ });
+ Ok(OpenChannel {
+ common_fields: CommonOpenChannelFields {
+ chain_hash,
+ temporary_channel_id,
+ funding_satoshis,
+ dust_limit_satoshis,
+ max_htlc_value_in_flight_msat,
+ htlc_minimum_msat,
+ commitment_feerate_sat_per_1000_weight,
+ to_self_delay,
+ max_accepted_htlcs,
+ funding_pubkey,
+ revocation_basepoint,
+ payment_basepoint,
+ delayed_payment_basepoint,
+ htlc_basepoint,
+ first_per_commitment_point,
+ channel_flags,
+ shutdown_scriptpubkey,
+ channel_type,
+ },
+ push_msat,
+ channel_reserve_satoshis,
+ })
+ }
+}
+
+impl Writeable for OpenChannelV2 {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ self.common_fields.chain_hash.write(w)?;
+ self.common_fields.temporary_channel_id.write(w)?;
+ self.funding_feerate_sat_per_1000_weight.write(w)?;
+ self.common_fields.commitment_feerate_sat_per_1000_weight.write(w)?;
+ self.common_fields.funding_satoshis.write(w)?;
+ self.common_fields.dust_limit_satoshis.write(w)?;
+ self.common_fields.max_htlc_value_in_flight_msat.write(w)?;
+ self.common_fields.htlc_minimum_msat.write(w)?;
+ self.common_fields.to_self_delay.write(w)?;
+ self.common_fields.max_accepted_htlcs.write(w)?;
+ self.locktime.write(w)?;
+ self.common_fields.funding_pubkey.write(w)?;
+ self.common_fields.revocation_basepoint.write(w)?;
+ self.common_fields.payment_basepoint.write(w)?;
+ self.common_fields.delayed_payment_basepoint.write(w)?;
+ self.common_fields.htlc_basepoint.write(w)?;
+ self.common_fields.first_per_commitment_point.write(w)?;
+ self.second_per_commitment_point.write(w)?;
+ self.common_fields.channel_flags.write(w)?;
+ encode_tlv_stream!(w, {
+ (0, self.common_fields.shutdown_scriptpubkey.as_ref().map(|s| WithoutLength(s)), option), // Don't encode length twice.
+ (1, self.common_fields.channel_type, option),
+ (2, self.require_confirmed_inputs, option),
+ });
+ Ok(())
+ }
+}
+
+impl Readable for OpenChannelV2 {
+ fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let chain_hash: ChainHash = Readable::read(r)?;
+ let temporary_channel_id: ChannelId = Readable::read(r)?;
+ let funding_feerate_sat_per_1000_weight: u32 = Readable::read(r)?;
+ let commitment_feerate_sat_per_1000_weight: u32 = Readable::read(r)?;
+ let funding_satoshis: u64 = Readable::read(r)?;
+ let dust_limit_satoshis: u64 = Readable::read(r)?;
+ let max_htlc_value_in_flight_msat: u64 = Readable::read(r)?;
+ let htlc_minimum_msat: u64 = Readable::read(r)?;
+ let to_self_delay: u16 = Readable::read(r)?;
+ let max_accepted_htlcs: u16 = Readable::read(r)?;
+ let locktime: u32 = Readable::read(r)?;
+ let funding_pubkey: PublicKey = Readable::read(r)?;
+ let revocation_basepoint: PublicKey = Readable::read(r)?;
+ let payment_basepoint: PublicKey = Readable::read(r)?;
+ let delayed_payment_basepoint: PublicKey = Readable::read(r)?;
+ let htlc_basepoint: PublicKey = Readable::read(r)?;
+ let first_per_commitment_point: PublicKey = Readable::read(r)?;
+ let second_per_commitment_point: PublicKey = Readable::read(r)?;
+ let channel_flags: u8 = Readable::read(r)?;
+
+ let mut shutdown_scriptpubkey: Option<ScriptBuf> = None;
+ let mut channel_type: Option<ChannelTypeFeatures> = None;
+ let mut require_confirmed_inputs: Option<()> = None;
+ decode_tlv_stream!(r, {
+ (0, shutdown_scriptpubkey, (option, encoding: (ScriptBuf, WithoutLength))),
+ (1, channel_type, option),
+ (2, require_confirmed_inputs, option),
+ });
+ Ok(OpenChannelV2 {
+ common_fields: CommonOpenChannelFields {
+ chain_hash,
+ temporary_channel_id,
+ funding_satoshis,
+ dust_limit_satoshis,
+ max_htlc_value_in_flight_msat,
+ htlc_minimum_msat,
+ commitment_feerate_sat_per_1000_weight,
+ to_self_delay,
+ max_accepted_htlcs,
+ funding_pubkey,
+ revocation_basepoint,
+ payment_basepoint,
+ delayed_payment_basepoint,
+ htlc_basepoint,
+ first_per_commitment_point,
+ channel_flags,
+ shutdown_scriptpubkey,
+ channel_type,
+ },
+ funding_feerate_sat_per_1000_weight,
+ locktime,
+ second_per_commitment_point,
+ require_confirmed_inputs,
+ })
+ }
+}
#[cfg(not(taproot))]
impl_writeable_msg!(RevokeAndACK, {
(6, short_channel_id, required)
});
},
+ Self::TrampolineEntrypoint {
+ amt_to_forward, outgoing_cltv_value, ref multipath_trampoline_data,
+ ref trampoline_packet
+ } => {
+ _encode_varint_length_prefixed_tlv!(w, {
+ (2, HighZeroBytesDroppedBigSize(*amt_to_forward), required),
+ (4, HighZeroBytesDroppedBigSize(*outgoing_cltv_value), required),
+ (8, multipath_trampoline_data, option),
+ (20, trampoline_packet, required)
+ });
+ },
Self::Receive {
ref payment_data, ref payment_metadata, ref keysend_preimage, sender_intended_htlc_amt_msat,
cltv_expiry_height, ref custom_tlvs,
},
Self::BlindedReceive {
sender_intended_htlc_amt_msat, total_msat, cltv_expiry_height, encrypted_tlvs,
- intro_node_blinding_point,
+ intro_node_blinding_point, keysend_preimage, ref custom_tlvs,
} => {
+ // We need to update [`ln::outbound_payment::RecipientOnionFields::with_custom_tlvs`]
+ // to reject any reserved types in the experimental range if new ones are ever
+ // standardized.
+ let keysend_tlv = keysend_preimage.map(|preimage| (5482373484, preimage.encode()));
+ let mut custom_tlvs: Vec<&(u64, Vec<u8>)> = custom_tlvs.iter().chain(keysend_tlv.iter()).collect();
+ custom_tlvs.sort_unstable_by_key(|(typ, _)| *typ);
_encode_varint_length_prefixed_tlv!(w, {
(2, HighZeroBytesDroppedBigSize(*sender_intended_htlc_amt_msat), required),
(4, HighZeroBytesDroppedBigSize(*cltv_expiry_height), required),
(10, *encrypted_tlvs, required_vec),
(12, intro_node_blinding_point, option),
(18, HighZeroBytesDroppedBigSize(*total_msat), required)
- });
+ }, custom_tlvs.iter());
},
}
Ok(())
}
}
+impl Writeable for OutboundTrampolinePayload {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ match self {
+ Self::Forward { amt_to_forward, outgoing_cltv_value, outgoing_node_id } => {
+ _encode_varint_length_prefixed_tlv!(w, {
+ (2, HighZeroBytesDroppedBigSize(*amt_to_forward), required),
+ (4, HighZeroBytesDroppedBigSize(*outgoing_cltv_value), required),
+ (14, outgoing_node_id, required)
+ });
+ }
+ }
+ Ok(())
+ }
+}
+
+
impl<NS: Deref> ReadableArgs<(Option<PublicKey>, &NS)> for InboundOnionPayload where NS::Target: NodeSigner {
fn read<R: Read>(r: &mut R, args: (Option<PublicKey>, &NS)) -> Result<Self, DecodeError> {
let (update_add_blinding_point, node_signer) = args;
}
if let Some(blinding_point) = intro_node_blinding_point.or(update_add_blinding_point) {
- if short_id.is_some() || payment_data.is_some() || payment_metadata.is_some() ||
- keysend_preimage.is_some()
- {
+ if short_id.is_some() || payment_data.is_some() || payment_metadata.is_some() {
return Err(DecodeError::InvalidValue)
}
let enc_tlvs = encrypted_tlvs_opt.ok_or(DecodeError::InvalidValue)?.0;
ChaChaPolyReadAdapter { readable: BlindedPaymentTlvs::Forward(ForwardTlvs {
short_channel_id, payment_relay, payment_constraints, features
})} => {
- if amt.is_some() || cltv_value.is_some() || total_msat.is_some() {
+ if amt.is_some() || cltv_value.is_some() || total_msat.is_some() ||
+ keysend_preimage.is_some()
+ {
return Err(DecodeError::InvalidValue)
}
Ok(Self::BlindedForward {
})
},
ChaChaPolyReadAdapter { readable: BlindedPaymentTlvs::Receive(ReceiveTlvs {
- payment_secret, payment_constraints
+ payment_secret, payment_constraints, payment_context
})} => {
if total_msat.unwrap_or(0) > MAX_VALUE_MSAT { return Err(DecodeError::InvalidValue) }
Ok(Self::BlindedReceive {
cltv_expiry_height: cltv_value.ok_or(DecodeError::InvalidValue)?,
payment_secret,
payment_constraints,
+ payment_context,
intro_node_blinding_point,
+ keysend_preimage,
+ custom_tlvs,
})
},
}
#[cfg(test)]
mod tests {
- use std::convert::TryFrom;
use bitcoin::{Transaction, TxIn, ScriptBuf, Sequence, Witness, TxOut};
use hex::DisplayHex;
use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
use crate::ln::ChannelId;
use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
- use crate::ln::msgs::{self, FinalOnionHopData, OnionErrorPacket};
+ use crate::ln::msgs::{self, FinalOnionHopData, OnionErrorPacket, CommonOpenChannelFields, CommonAcceptChannelFields, TrampolineOnionPacket};
use crate::ln::msgs::SocketAddress;
use crate::routing::gossip::{NodeAlias, NodeId};
- use crate::util::ser::{Writeable, Readable, ReadableArgs, Hostname, TransactionU16LenLimited};
+ use crate::util::ser::{BigSize, Hostname, Readable, ReadableArgs, TransactionU16LenLimited, Writeable};
use crate::util::test_utils;
use bitcoin::hashes::hex::FromHex;
let (_, pubkey_5) = get_keys_from!("0505050505050505050505050505050505050505050505050505050505050505", secp_ctx);
let (_, pubkey_6) = get_keys_from!("0606060606060606060606060606060606060606060606060606060606060606", secp_ctx);
let open_channel = msgs::OpenChannel {
- chain_hash: ChainHash::using_genesis_block(Network::Bitcoin),
- temporary_channel_id: ChannelId::from_bytes([2; 32]),
- funding_satoshis: 1311768467284833366,
+ common_fields: CommonOpenChannelFields {
+ chain_hash: ChainHash::using_genesis_block(Network::Bitcoin),
+ temporary_channel_id: ChannelId::from_bytes([2; 32]),
+ funding_satoshis: 1311768467284833366,
+ dust_limit_satoshis: 3608586615801332854,
+ max_htlc_value_in_flight_msat: 8517154655701053848,
+ htlc_minimum_msat: 2316138423780173,
+ commitment_feerate_sat_per_1000_weight: 821716,
+ to_self_delay: 49340,
+ max_accepted_htlcs: 49340,
+ funding_pubkey: pubkey_1,
+ revocation_basepoint: pubkey_2,
+ payment_basepoint: pubkey_3,
+ delayed_payment_basepoint: pubkey_4,
+ htlc_basepoint: pubkey_5,
+ first_per_commitment_point: pubkey_6,
+ channel_flags: if random_bit { 1 << 5 } else { 0 },
+ shutdown_scriptpubkey: if shutdown { Some(Address::p2pkh(&::bitcoin::PublicKey{compressed: true, inner: pubkey_1}, Network::Testnet).script_pubkey()) } else { None },
+ channel_type: if incl_chan_type { Some(ChannelTypeFeatures::empty()) } else { None },
+ },
push_msat: 2536655962884945560,
- dust_limit_satoshis: 3608586615801332854,
- max_htlc_value_in_flight_msat: 8517154655701053848,
channel_reserve_satoshis: 8665828695742877976,
- htlc_minimum_msat: 2316138423780173,
- feerate_per_kw: 821716,
- to_self_delay: 49340,
- max_accepted_htlcs: 49340,
- funding_pubkey: pubkey_1,
- revocation_basepoint: pubkey_2,
- payment_point: pubkey_3,
- delayed_payment_basepoint: pubkey_4,
- htlc_basepoint: pubkey_5,
- first_per_commitment_point: pubkey_6,
- channel_flags: if random_bit { 1 << 5 } else { 0 },
- shutdown_scriptpubkey: if shutdown { Some(Address::p2pkh(&::bitcoin::PublicKey{compressed: true, inner: pubkey_1}, Network::Testnet).script_pubkey()) } else { None },
- channel_type: if incl_chan_type { Some(ChannelTypeFeatures::empty()) } else { None },
};
let encoded_value = open_channel.encode();
let mut target_value = Vec::new();
let (_, pubkey_6) = get_keys_from!("0606060606060606060606060606060606060606060606060606060606060606", secp_ctx);
let (_, pubkey_7) = get_keys_from!("0707070707070707070707070707070707070707070707070707070707070707", secp_ctx);
let open_channelv2 = msgs::OpenChannelV2 {
- chain_hash: ChainHash::using_genesis_block(Network::Bitcoin),
- temporary_channel_id: ChannelId::from_bytes([2; 32]),
+ common_fields: CommonOpenChannelFields {
+ chain_hash: ChainHash::using_genesis_block(Network::Bitcoin),
+ temporary_channel_id: ChannelId::from_bytes([2; 32]),
+ commitment_feerate_sat_per_1000_weight: 821716,
+ funding_satoshis: 1311768467284833366,
+ dust_limit_satoshis: 3608586615801332854,
+ max_htlc_value_in_flight_msat: 8517154655701053848,
+ htlc_minimum_msat: 2316138423780173,
+ to_self_delay: 49340,
+ max_accepted_htlcs: 49340,
+ funding_pubkey: pubkey_1,
+ revocation_basepoint: pubkey_2,
+ payment_basepoint: pubkey_3,
+ delayed_payment_basepoint: pubkey_4,
+ htlc_basepoint: pubkey_5,
+ first_per_commitment_point: pubkey_6,
+ channel_flags: if random_bit { 1 << 5 } else { 0 },
+ shutdown_scriptpubkey: if shutdown { Some(Address::p2pkh(&::bitcoin::PublicKey{compressed: true, inner: pubkey_1}, Network::Testnet).script_pubkey()) } else { None },
+ channel_type: if incl_chan_type { Some(ChannelTypeFeatures::empty()) } else { None },
+ },
funding_feerate_sat_per_1000_weight: 821716,
- commitment_feerate_sat_per_1000_weight: 821716,
- funding_satoshis: 1311768467284833366,
- dust_limit_satoshis: 3608586615801332854,
- max_htlc_value_in_flight_msat: 8517154655701053848,
- htlc_minimum_msat: 2316138423780173,
- to_self_delay: 49340,
- max_accepted_htlcs: 49340,
locktime: 305419896,
- funding_pubkey: pubkey_1,
- revocation_basepoint: pubkey_2,
- payment_basepoint: pubkey_3,
- delayed_payment_basepoint: pubkey_4,
- htlc_basepoint: pubkey_5,
- first_per_commitment_point: pubkey_6,
second_per_commitment_point: pubkey_7,
- channel_flags: if random_bit { 1 << 5 } else { 0 },
- shutdown_scriptpubkey: if shutdown { Some(Address::p2pkh(&::bitcoin::PublicKey{compressed: true, inner: pubkey_1}, Network::Testnet).script_pubkey()) } else { None },
- channel_type: if incl_chan_type { Some(ChannelTypeFeatures::empty()) } else { None },
require_confirmed_inputs: if require_confirmed_inputs { Some(()) } else { None },
};
let encoded_value = open_channelv2.encode();
target_value.append(&mut <Vec<u8>>::from_hex("00").unwrap());
}
if shutdown {
- target_value.append(&mut <Vec<u8>>::from_hex("001b").unwrap()); // Type 0 + Length 27
target_value.append(&mut <Vec<u8>>::from_hex("001976a91479b000887626b294a914501a4cd226b58b23598388ac").unwrap());
}
if incl_chan_type {
let (_, pubkey_5) = get_keys_from!("0505050505050505050505050505050505050505050505050505050505050505", secp_ctx);
let (_, pubkey_6) = get_keys_from!("0606060606060606060606060606060606060606060606060606060606060606", secp_ctx);
let accept_channel = msgs::AcceptChannel {
- temporary_channel_id: ChannelId::from_bytes([2; 32]),
- dust_limit_satoshis: 1311768467284833366,
- max_htlc_value_in_flight_msat: 2536655962884945560,
+ common_fields: CommonAcceptChannelFields {
+ temporary_channel_id: ChannelId::from_bytes([2; 32]),
+ dust_limit_satoshis: 1311768467284833366,
+ max_htlc_value_in_flight_msat: 2536655962884945560,
+ htlc_minimum_msat: 2316138423780173,
+ minimum_depth: 821716,
+ to_self_delay: 49340,
+ max_accepted_htlcs: 49340,
+ funding_pubkey: pubkey_1,
+ revocation_basepoint: pubkey_2,
+ payment_basepoint: pubkey_3,
+ delayed_payment_basepoint: pubkey_4,
+ htlc_basepoint: pubkey_5,
+ first_per_commitment_point: pubkey_6,
+ shutdown_scriptpubkey: if shutdown { Some(Address::p2pkh(&::bitcoin::PublicKey{compressed: true, inner: pubkey_1}, Network::Testnet).script_pubkey()) } else { None },
+ channel_type: None,
+ },
channel_reserve_satoshis: 3608586615801332854,
- htlc_minimum_msat: 2316138423780173,
- minimum_depth: 821716,
- to_self_delay: 49340,
- max_accepted_htlcs: 49340,
- funding_pubkey: pubkey_1,
- revocation_basepoint: pubkey_2,
- payment_point: pubkey_3,
- delayed_payment_basepoint: pubkey_4,
- htlc_basepoint: pubkey_5,
- first_per_commitment_point: pubkey_6,
- shutdown_scriptpubkey: if shutdown { Some(Address::p2pkh(&::bitcoin::PublicKey{compressed: true, inner: pubkey_1}, Network::Testnet).script_pubkey()) } else { None },
- channel_type: None,
#[cfg(taproot)]
next_local_nonce: None,
};
let (_, pubkey_6) = get_keys_from!("0606060606060606060606060606060606060606060606060606060606060606", secp_ctx);
let (_, pubkey_7) = get_keys_from!("0707070707070707070707070707070707070707070707070707070707070707", secp_ctx);
let accept_channelv2 = msgs::AcceptChannelV2 {
- temporary_channel_id: ChannelId::from_bytes([2; 32]),
+ common_fields: CommonAcceptChannelFields {
+ temporary_channel_id: ChannelId::from_bytes([2; 32]),
+ dust_limit_satoshis: 1311768467284833366,
+ max_htlc_value_in_flight_msat: 2536655962884945560,
+ htlc_minimum_msat: 2316138423780173,
+ minimum_depth: 821716,
+ to_self_delay: 49340,
+ max_accepted_htlcs: 49340,
+ funding_pubkey: pubkey_1,
+ revocation_basepoint: pubkey_2,
+ payment_basepoint: pubkey_3,
+ delayed_payment_basepoint: pubkey_4,
+ htlc_basepoint: pubkey_5,
+ first_per_commitment_point: pubkey_6,
+ shutdown_scriptpubkey: if shutdown { Some(Address::p2pkh(&::bitcoin::PublicKey{compressed: true, inner: pubkey_1}, Network::Testnet).script_pubkey()) } else { None },
+ channel_type: None,
+ },
funding_satoshis: 1311768467284833366,
- dust_limit_satoshis: 1311768467284833366,
- max_htlc_value_in_flight_msat: 2536655962884945560,
- htlc_minimum_msat: 2316138423780173,
- minimum_depth: 821716,
- to_self_delay: 49340,
- max_accepted_htlcs: 49340,
- funding_pubkey: pubkey_1,
- revocation_basepoint: pubkey_2,
- payment_basepoint: pubkey_3,
- delayed_payment_basepoint: pubkey_4,
- htlc_basepoint: pubkey_5,
- first_per_commitment_point: pubkey_6,
second_per_commitment_point: pubkey_7,
- shutdown_scriptpubkey: if shutdown { Some(Address::p2pkh(&::bitcoin::PublicKey{compressed: true, inner: pubkey_1}, Network::Testnet).script_pubkey()) } else { None },
- channel_type: None,
require_confirmed_inputs: None,
};
let encoded_value = accept_channelv2.encode();
target_value.append(&mut <Vec<u8>>::from_hex("03f006a18d5653c4edf5391ff23a61f03ff83d237e880ee61187fa9f379a028e0a").unwrap()); // first_per_commitment_point
target_value.append(&mut <Vec<u8>>::from_hex("02989c0b76cb563971fdc9bef31ec06c3560f3249d6ee9e5d83c57625596e05f6f").unwrap()); // second_per_commitment_point
if shutdown {
- target_value.append(&mut <Vec<u8>>::from_hex("001b").unwrap()); // Type 0 + Length 27
target_value.append(&mut <Vec<u8>>::from_hex("001976a91479b000887626b294a914501a4cd226b58b23598388ac").unwrap());
}
assert_eq!(encoded_value, target_value);
#[test]
fn encoding_tx_signatures() {
+ let secp_ctx = Secp256k1::new();
+ let (privkey_1, _) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
+ let sig_1 = get_sig_on!(privkey_1, secp_ctx, String::from("01010101010101010101010101010101"));
+
let tx_signatures = msgs::TxSignatures {
channel_id: ChannelId::from_bytes([2; 32]),
tx_hash: Txid::from_str("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap(),
<Vec<u8>>::from_hex("3045022100ee00dbf4a862463e837d7c08509de814d620e4d9830fa84818713e0fa358f145022021c3c7060c4d53fe84fd165d60208451108a778c13b92ca4c6bad439236126cc01").unwrap(),
<Vec<u8>>::from_hex("028fbbf0b16f5ba5bcb5dd37cd4047ce6f726a21c06682f9ec2f52b057de1dbdb5").unwrap()]),
],
+ funding_outpoint_sig: Some(sig_1),
};
let encoded_value = tx_signatures.encode();
let mut target_value = <Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap(); // channel_id
target_value.append(&mut <Vec<u8>>::from_hex("3045022100ee00dbf4a862463e837d7c08509de814d620e4d9830fa84818713e0fa358f145022021c3c7060c4d53fe84fd165d60208451108a778c13b92ca4c6bad439236126cc01").unwrap());
target_value.append(&mut <Vec<u8>>::from_hex("21").unwrap()); // len of witness element data (VarInt)
target_value.append(&mut <Vec<u8>>::from_hex("028fbbf0b16f5ba5bcb5dd37cd4047ce6f726a21c06682f9ec2f52b057de1dbdb5").unwrap());
+ target_value.append(&mut <Vec<u8>>::from_hex("0040").unwrap()); // type and len (64)
+ target_value.append(&mut <Vec<u8>>::from_hex("d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a").unwrap());
assert_eq!(encoded_value, target_value);
}
} else { panic!(); }
}
+ #[test]
+ fn encoding_final_onion_hop_data_with_trampoline_packet() {
+ let secp_ctx = Secp256k1::new();
+ let (_private_key, public_key) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
+
+ let compressed_public_key = public_key.serialize();
+ assert_eq!(compressed_public_key.len(), 33);
+
+ let trampoline_packet = TrampolineOnionPacket {
+ version: 0,
+ public_key,
+ hop_data: vec![1; 650], // this should be the standard encoded length
+ hmac: [2; 32],
+ };
+ let encoded_trampoline_packet = trampoline_packet.encode();
+ assert_eq!(encoded_trampoline_packet.len(), 716);
+
+ let msg = msgs::OutboundOnionPayload::TrampolineEntrypoint {
+ multipath_trampoline_data: None,
+ amt_to_forward: 0x0badf00d01020304,
+ outgoing_cltv_value: 0xffffffff,
+ trampoline_packet,
+ };
+ let encoded_payload = msg.encode();
+
+ let trampoline_type_bytes = &encoded_payload[19..=19];
+ let mut trampoline_type_cursor = Cursor::new(trampoline_type_bytes);
+ let trampoline_type_big_size: BigSize = Readable::read(&mut trampoline_type_cursor).unwrap();
+ assert_eq!(trampoline_type_big_size.0, 20);
+
+ let trampoline_length_bytes = &encoded_payload[20..=22];
+ let mut trampoline_length_cursor = Cursor::new(trampoline_length_bytes);
+ let trampoline_length_big_size: BigSize = Readable::read(&mut trampoline_length_cursor).unwrap();
+ assert_eq!(trampoline_length_big_size.0, encoded_trampoline_packet.len() as u64);
+ }
+
+ #[test]
+ fn encoding_final_onion_hop_data_with_eclair_trampoline_packet() {
+ let public_key = PublicKey::from_slice(&<Vec<u8>>::from_hex("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()).unwrap();
+ let hop_data = <Vec<u8>>::from_hex("cff34152f3a36e52ca94e74927203a560392b9cc7ce3c45809c6be52166c24a595716880f95f178bf5b30ca63141f74db6e92795c6130877cfdac3d4bd3087ee73c65d627ddd709112a848cc99e303f3706509aa43ba7c8a88cba175fccf9a8f5016ef06d3b935dbb15196d7ce16dc1a7157845566901d7b2197e52cab4ce487014b14816e5805f9fcacb4f8f88b8ff176f1b94f6ce6b00bc43221130c17d20ef629db7c5f7eafaa166578c720619561dd14b3277db557ec7dcdb793771aef0f2f667cfdbeae3ac8d331c5994779dffb31e5fc0dbdedc0c592ca6d21c18e47fe3528d6975c19517d7e2ea8c5391cf17d0fe30c80913ed887234ccb48808f7ef9425bcd815c3b586210979e3bb286ef2851bf9ce04e28c40a203df98fd648d2f1936fd2f1def0e77eecb277229b4b682322371c0a1dbfcd723a991993df8cc1f2696b84b055b40a1792a29f710295a18fbd351b0f3ff34cd13941131b8278ba79303c89117120eea691738a9954908195143b039dbeed98f26a92585f3d15cf742c953799d3272e0545e9b744be9d3b4c").unwrap();
+ let hmac_vector = <Vec<u8>>::from_hex("bb079bfc4b35190eee9f59a1d7b41ba2f773179f322dafb4b1af900c289ebd6c").unwrap();
+ let mut hmac = [0; 32];
+ hmac.copy_from_slice(&hmac_vector);
+
+ let compressed_public_key = public_key.serialize();
+ assert_eq!(compressed_public_key.len(), 33);
+
+ let trampoline_packet = TrampolineOnionPacket {
+ version: 0,
+ public_key,
+ hop_data,
+ hmac,
+ };
+ let encoded_trampoline_packet = trampoline_packet.encode();
+ let expected_eclair_trampoline_packet = <Vec<u8>>::from_hex("0002eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619cff34152f3a36e52ca94e74927203a560392b9cc7ce3c45809c6be52166c24a595716880f95f178bf5b30ca63141f74db6e92795c6130877cfdac3d4bd3087ee73c65d627ddd709112a848cc99e303f3706509aa43ba7c8a88cba175fccf9a8f5016ef06d3b935dbb15196d7ce16dc1a7157845566901d7b2197e52cab4ce487014b14816e5805f9fcacb4f8f88b8ff176f1b94f6ce6b00bc43221130c17d20ef629db7c5f7eafaa166578c720619561dd14b3277db557ec7dcdb793771aef0f2f667cfdbeae3ac8d331c5994779dffb31e5fc0dbdedc0c592ca6d21c18e47fe3528d6975c19517d7e2ea8c5391cf17d0fe30c80913ed887234ccb48808f7ef9425bcd815c3b586210979e3bb286ef2851bf9ce04e28c40a203df98fd648d2f1936fd2f1def0e77eecb277229b4b682322371c0a1dbfcd723a991993df8cc1f2696b84b055b40a1792a29f710295a18fbd351b0f3ff34cd13941131b8278ba79303c89117120eea691738a9954908195143b039dbeed98f26a92585f3d15cf742c953799d3272e0545e9b744be9d3b4cbb079bfc4b35190eee9f59a1d7b41ba2f773179f322dafb4b1af900c289ebd6c").unwrap();
+ assert_eq!(encoded_trampoline_packet, expected_eclair_trampoline_packet);
+ }
+
#[test]
fn query_channel_range_end_blocknum() {
let tests: Vec<(u32, u32, u32)> = vec![
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Functional tests for the BOLT 12 Offers payment flow.
+//!
+//! [`ChannelManager`] provides utilities to create [`Offer`]s and [`Refund`]s along with utilities
+//! to initiate and request payment for them, respectively. It also manages the payment flow via
+//! implementing [`OffersMessageHandler`]. This module tests that functionality, including the
+//! resulting [`Event`] generation.
+//!
+//! Two-node success tests use an announced channel:
+//!
+//! Alice --- Bob
+//!
+//! While two-node failure tests use an unannounced channel:
+//!
+//! Alice ... Bob
+//!
+//! Six-node tests use unannounced channels for the sender and recipient and announced channels for
+//! the rest of the network.
+//!
+//! nodes[4]
+//! / \
+//! / \
+//! / \
+//! Alice ... Bob -------- Charlie ... David
+//! \ /
+//! \ /
+//! \ /
+//! nodes[5]
+//!
+//! Unnamed nodes are needed to ensure unannounced nodes can create two-hop blinded paths.
+//!
+//! Nodes without channels are disconnected and connected as needed to ensure that deterministic
+//! blinded paths are used.
+
+use bitcoin::network::constants::Network;
+use core::time::Duration;
+use crate::blinded_path::{BlindedPath, IntroductionNode};
+use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, PaymentContext};
+use crate::events::{Event, MessageSendEventsProvider, PaymentPurpose};
+use crate::ln::channelmanager::{PaymentId, RecentPaymentDetails, Retry, self};
+use crate::ln::features::InvoiceRequestFeatures;
+use crate::ln::functional_test_utils::*;
+use crate::ln::msgs::{ChannelMessageHandler, Init, NodeAnnouncement, OnionMessage, OnionMessageHandler, RoutingMessageHandler, SocketAddress, UnsignedGossipMessage, UnsignedNodeAnnouncement};
+use crate::offers::invoice::Bolt12Invoice;
+use crate::offers::invoice_error::InvoiceError;
+use crate::offers::invoice_request::{InvoiceRequest, InvoiceRequestFields};
+use crate::offers::parse::Bolt12SemanticError;
+use crate::onion_message::messenger::PeeledOnion;
+use crate::onion_message::offers::OffersMessage;
+use crate::onion_message::packet::ParsedOnionMessageContents;
+use crate::routing::gossip::{NodeAlias, NodeId};
+use crate::sign::{NodeSigner, Recipient};
+
+use crate::prelude::*;
+
+macro_rules! expect_recent_payment {
+ ($node: expr, $payment_state: path, $payment_id: expr) => {
+ match $node.node.list_recent_payments().first() {
+ Some(&$payment_state { payment_id: actual_payment_id, .. }) => {
+ assert_eq!($payment_id, actual_payment_id);
+ },
+ Some(_) => panic!("Unexpected recent payment state"),
+ None => panic!("No recent payments"),
+ }
+ }
+}
+
+fn connect_peers<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>) {
+ let node_id_a = node_a.node.get_our_node_id();
+ let node_id_b = node_b.node.get_our_node_id();
+
+ let init_a = Init {
+ features: node_a.init_features(&node_id_b),
+ networks: None,
+ remote_network_address: None,
+ };
+ let init_b = Init {
+ features: node_b.init_features(&node_id_a),
+ networks: None,
+ remote_network_address: None,
+ };
+
+ node_a.node.peer_connected(&node_id_b, &init_b, true).unwrap();
+ node_b.node.peer_connected(&node_id_a, &init_a, false).unwrap();
+ node_a.onion_messenger.peer_connected(&node_id_b, &init_b, true).unwrap();
+ node_b.onion_messenger.peer_connected(&node_id_a, &init_a, false).unwrap();
+}
+
+fn disconnect_peers<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, peers: &[&Node<'a, 'b, 'c>]) {
+ for node_b in peers {
+ node_a.node.peer_disconnected(&node_b.node.get_our_node_id());
+ node_b.node.peer_disconnected(&node_a.node.get_our_node_id());
+ node_a.onion_messenger.peer_disconnected(&node_b.node.get_our_node_id());
+ node_b.onion_messenger.peer_disconnected(&node_a.node.get_our_node_id());
+ }
+}
+
+fn announce_node_address<'a, 'b, 'c>(
+ node: &Node<'a, 'b, 'c>, peers: &[&Node<'a, 'b, 'c>], address: SocketAddress,
+) {
+ let features = node.onion_messenger.provided_node_features()
+ | node.gossip_sync.provided_node_features();
+ let rgb = [0u8; 3];
+ let announcement = UnsignedNodeAnnouncement {
+ features,
+ timestamp: 1000,
+ node_id: NodeId::from_pubkey(&node.keys_manager.get_node_id(Recipient::Node).unwrap()),
+ rgb,
+ alias: NodeAlias([0u8; 32]),
+ addresses: vec![address],
+ excess_address_data: Vec::new(),
+ excess_data: Vec::new(),
+ };
+ let signature = node.keys_manager.sign_gossip_message(
+ UnsignedGossipMessage::NodeAnnouncement(&announcement)
+ ).unwrap();
+
+ let msg = NodeAnnouncement {
+ signature,
+ contents: announcement
+ };
+
+ node.gossip_sync.handle_node_announcement(&msg).unwrap();
+ for peer in peers {
+ peer.gossip_sync.handle_node_announcement(&msg).unwrap();
+ }
+}
+
+fn route_bolt12_payment<'a, 'b, 'c>(
+ node: &Node<'a, 'b, 'c>, path: &[&Node<'a, 'b, 'c>], invoice: &Bolt12Invoice
+) {
+ // Monitor added when handling the invoice onion message.
+ check_added_monitors(node, 1);
+
+ let mut events = node.node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let ev = remove_first_msg_event_to_node(&path[0].node.get_our_node_id(), &mut events);
+
+ // Use a fake payment_hash and bypass checking for the PaymentClaimable event since the
+ // invoice contains the payment_hash but it was encrypted inside an onion message.
+ let amount_msats = invoice.amount_msats();
+ let payment_hash = invoice.payment_hash();
+ let args = PassAlongPathArgs::new(node, path, amount_msats, payment_hash, ev)
+ .without_clearing_recipient_events();
+ do_pass_along_path(args);
+}
+
+fn claim_bolt12_payment<'a, 'b, 'c>(
+ node: &Node<'a, 'b, 'c>, path: &[&Node<'a, 'b, 'c>], expected_payment_context: PaymentContext
+) {
+ let recipient = &path[path.len() - 1];
+ let payment_purpose = match get_event!(recipient, Event::PaymentClaimable) {
+ Event::PaymentClaimable { purpose, .. } => purpose,
+ _ => panic!("No Event::PaymentClaimable"),
+ };
+ let payment_preimage = match payment_purpose.preimage() {
+ Some(preimage) => preimage,
+ None => panic!("No preimage in Event::PaymentClaimable"),
+ };
+ match payment_purpose {
+ PaymentPurpose::Bolt12OfferPayment { payment_context, .. } => {
+ assert_eq!(PaymentContext::Bolt12Offer(payment_context), expected_payment_context);
+ },
+ PaymentPurpose::Bolt12RefundPayment { payment_context, .. } => {
+ assert_eq!(PaymentContext::Bolt12Refund(payment_context), expected_payment_context);
+ },
+ _ => panic!("Unexpected payment purpose: {:?}", payment_purpose),
+ }
+ claim_payment(node, path, payment_preimage);
+}
+
+fn extract_invoice_request<'a, 'b, 'c>(
+ node: &Node<'a, 'b, 'c>, message: &OnionMessage
+) -> (InvoiceRequest, Option<BlindedPath>) {
+ match node.onion_messenger.peel_onion_message(message) {
+ Ok(PeeledOnion::Receive(message, _, reply_path)) => match message {
+ ParsedOnionMessageContents::Offers(offers_message) => match offers_message {
+ OffersMessage::InvoiceRequest(invoice_request) => (invoice_request, reply_path),
+ OffersMessage::Invoice(invoice) => panic!("Unexpected invoice: {:?}", invoice),
+ OffersMessage::InvoiceError(error) => panic!("Unexpected invoice_error: {:?}", error),
+ },
+ ParsedOnionMessageContents::Custom(message) => panic!("Unexpected custom message: {:?}", message),
+ },
+ Ok(PeeledOnion::Forward(_, _)) => panic!("Unexpected onion message forward"),
+ Err(e) => panic!("Failed to process onion message {:?}", e),
+ }
+}
+
+fn extract_invoice<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, message: &OnionMessage) -> Bolt12Invoice {
+ match node.onion_messenger.peel_onion_message(message) {
+ Ok(PeeledOnion::Receive(message, _, _)) => match message {
+ ParsedOnionMessageContents::Offers(offers_message) => match offers_message {
+ OffersMessage::InvoiceRequest(invoice_request) => panic!("Unexpected invoice_request: {:?}", invoice_request),
+ OffersMessage::Invoice(invoice) => invoice,
+ OffersMessage::InvoiceError(error) => panic!("Unexpected invoice_error: {:?}", error),
+ },
+ ParsedOnionMessageContents::Custom(message) => panic!("Unexpected custom message: {:?}", message),
+ },
+ Ok(PeeledOnion::Forward(_, _)) => panic!("Unexpected onion message forward"),
+ Err(e) => panic!("Failed to process onion message {:?}", e),
+ }
+}
+
+fn extract_invoice_error<'a, 'b, 'c>(
+ node: &Node<'a, 'b, 'c>, message: &OnionMessage
+) -> InvoiceError {
+ match node.onion_messenger.peel_onion_message(message) {
+ Ok(PeeledOnion::Receive(message, _, _)) => match message {
+ ParsedOnionMessageContents::Offers(offers_message) => match offers_message {
+ OffersMessage::InvoiceRequest(invoice_request) => panic!("Unexpected invoice_request: {:?}", invoice_request),
+ OffersMessage::Invoice(invoice) => panic!("Unexpected invoice: {:?}", invoice),
+ OffersMessage::InvoiceError(error) => error,
+ },
+ ParsedOnionMessageContents::Custom(message) => panic!("Unexpected custom message: {:?}", message),
+ },
+ Ok(PeeledOnion::Forward(_, _)) => panic!("Unexpected onion message forward"),
+ Err(e) => panic!("Failed to process onion message {:?}", e),
+ }
+}
+
+/// Checks that blinded paths without Tor-only nodes are preferred when constructing an offer.
+#[test]
+fn prefers_non_tor_nodes_in_blinded_paths() {
+ let mut accept_forward_cfg = test_default_channel_config();
+ accept_forward_cfg.accept_forwards_to_priv_channels = true;
+
+ let mut features = channelmanager::provided_init_features(&accept_forward_cfg);
+ features.set_onion_messages_optional();
+ features.set_route_blinding_optional();
+
+ let chanmon_cfgs = create_chanmon_cfgs(6);
+ let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
+
+ *node_cfgs[1].override_init_features.borrow_mut() = Some(features);
+
+ let node_chanmgrs = create_node_chanmgrs(
+ 6, &node_cfgs, &[None, Some(accept_forward_cfg), None, None, None, None]
+ );
+ let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+ create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 5, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 5, 10_000_000, 1_000_000_000);
+
+ // Add an extra channel so that more than one of Bob's peers have MIN_PEER_CHANNELS.
+ create_announced_chan_between_nodes_with_value(&nodes, 4, 5, 10_000_000, 1_000_000_000);
+
+ let (alice, bob, charlie, david) = (&nodes[0], &nodes[1], &nodes[2], &nodes[3]);
+ let bob_id = bob.node.get_our_node_id();
+ let charlie_id = charlie.node.get_our_node_id();
+
+ disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5]]);
+ disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]);
+
+ let tor = SocketAddress::OnionV2([255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 38, 7]);
+ announce_node_address(charlie, &[alice, bob, david, &nodes[4], &nodes[5]], tor.clone());
+
+ let offer = bob.node
+ .create_offer_builder("coffee".to_string()).unwrap()
+ .amount_msats(10_000_000)
+ .build().unwrap();
+ assert_ne!(offer.signing_pubkey(), bob_id);
+ assert!(!offer.paths().is_empty());
+ for path in offer.paths() {
+ assert_ne!(path.introduction_node, IntroductionNode::NodeId(bob_id));
+ assert_ne!(path.introduction_node, IntroductionNode::NodeId(charlie_id));
+ }
+
+ // Use a one-hop blinded path when Bob is announced and all his peers are Tor-only.
+ announce_node_address(&nodes[4], &[alice, bob, charlie, david, &nodes[5]], tor.clone());
+ announce_node_address(&nodes[5], &[alice, bob, charlie, david, &nodes[4]], tor.clone());
+
+ let offer = bob.node
+ .create_offer_builder("coffee".to_string()).unwrap()
+ .amount_msats(10_000_000)
+ .build().unwrap();
+ assert_ne!(offer.signing_pubkey(), bob_id);
+ assert!(!offer.paths().is_empty());
+ for path in offer.paths() {
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(bob_id));
+ }
+}
+
+/// Checks that blinded paths prefer an introduction node that is the most connected.
+#[test]
+fn prefers_more_connected_nodes_in_blinded_paths() {
+ let mut accept_forward_cfg = test_default_channel_config();
+ accept_forward_cfg.accept_forwards_to_priv_channels = true;
+
+ let mut features = channelmanager::provided_init_features(&accept_forward_cfg);
+ features.set_onion_messages_optional();
+ features.set_route_blinding_optional();
+
+ let chanmon_cfgs = create_chanmon_cfgs(6);
+ let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
+
+ *node_cfgs[1].override_init_features.borrow_mut() = Some(features);
+
+ let node_chanmgrs = create_node_chanmgrs(
+ 6, &node_cfgs, &[None, Some(accept_forward_cfg), None, None, None, None]
+ );
+ let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+ create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 5, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 5, 10_000_000, 1_000_000_000);
+
+ // Add extra channels so that more than one of Bob's peers have MIN_PEER_CHANNELS and one has
+ // more than the others.
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 3, 4, 10_000_000, 1_000_000_000);
+
+ let (alice, bob, charlie, david) = (&nodes[0], &nodes[1], &nodes[2], &nodes[3]);
+ let bob_id = bob.node.get_our_node_id();
+
+ disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5]]);
+ disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]);
+
+ let offer = bob.node
+ .create_offer_builder("coffee".to_string()).unwrap()
+ .amount_msats(10_000_000)
+ .build().unwrap();
+ assert_ne!(offer.signing_pubkey(), bob_id);
+ assert!(!offer.paths().is_empty());
+ for path in offer.paths() {
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(nodes[4].node.get_our_node_id()));
+ }
+}
+
+/// Checks that an offer can be paid through blinded paths and that ephemeral pubkeys are used
+/// rather than exposing a node's pubkey.
+#[test]
+fn creates_and_pays_for_offer_using_two_hop_blinded_path() {
+ let mut accept_forward_cfg = test_default_channel_config();
+ accept_forward_cfg.accept_forwards_to_priv_channels = true;
+
+ let mut features = channelmanager::provided_init_features(&accept_forward_cfg);
+ features.set_onion_messages_optional();
+ features.set_route_blinding_optional();
+
+ let chanmon_cfgs = create_chanmon_cfgs(6);
+ let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
+
+ *node_cfgs[1].override_init_features.borrow_mut() = Some(features);
+
+ let node_chanmgrs = create_node_chanmgrs(
+ 6, &node_cfgs, &[None, Some(accept_forward_cfg), None, None, None, None]
+ );
+ let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+ create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 5, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 5, 10_000_000, 1_000_000_000);
+
+ let (alice, bob, charlie, david) = (&nodes[0], &nodes[1], &nodes[2], &nodes[3]);
+ let alice_id = alice.node.get_our_node_id();
+ let bob_id = bob.node.get_our_node_id();
+ let charlie_id = charlie.node.get_our_node_id();
+ let david_id = david.node.get_our_node_id();
+
+ disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5]]);
+ disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]);
+
+ let offer = alice.node
+ .create_offer_builder("coffee".to_string())
+ .unwrap()
+ .amount_msats(10_000_000)
+ .build().unwrap();
+ assert_ne!(offer.signing_pubkey(), alice_id);
+ assert!(!offer.paths().is_empty());
+ for path in offer.paths() {
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(bob_id));
+ }
+
+ let payment_id = PaymentId([1; 32]);
+ david.node.pay_for_offer(&offer, None, None, None, payment_id, Retry::Attempts(0), None)
+ .unwrap();
+ expect_recent_payment!(david, RecentPaymentDetails::AwaitingInvoice, payment_id);
+
+ connect_peers(david, bob);
+
+ let onion_message = david.onion_messenger.next_onion_message_for_peer(bob_id).unwrap();
+ bob.onion_messenger.handle_onion_message(&david_id, &onion_message);
+
+ connect_peers(alice, charlie);
+
+ let onion_message = bob.onion_messenger.next_onion_message_for_peer(alice_id).unwrap();
+ alice.onion_messenger.handle_onion_message(&bob_id, &onion_message);
+
+ let (invoice_request, reply_path) = extract_invoice_request(alice, &onion_message);
+ let payment_context = PaymentContext::Bolt12Offer(Bolt12OfferContext {
+ offer_id: offer.id(),
+ invoice_request: InvoiceRequestFields {
+ payer_id: invoice_request.payer_id(),
+ amount_msats: None,
+ features: InvoiceRequestFeatures::empty(),
+ quantity: None,
+ payer_note_truncated: None,
+ },
+ });
+ assert_eq!(invoice_request.amount_msats(), None);
+ assert_ne!(invoice_request.payer_id(), david_id);
+ assert_eq!(reply_path.unwrap().introduction_node, IntroductionNode::NodeId(charlie_id));
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(charlie_id).unwrap();
+ charlie.onion_messenger.handle_onion_message(&alice_id, &onion_message);
+
+ let onion_message = charlie.onion_messenger.next_onion_message_for_peer(david_id).unwrap();
+ david.onion_messenger.handle_onion_message(&charlie_id, &onion_message);
+
+ let invoice = extract_invoice(david, &onion_message);
+ assert_eq!(invoice.amount_msats(), 10_000_000);
+ assert_ne!(invoice.signing_pubkey(), alice_id);
+ assert!(!invoice.payment_paths().is_empty());
+ for (_, path) in invoice.payment_paths() {
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(bob_id));
+ }
+
+ route_bolt12_payment(david, &[charlie, bob, alice], &invoice);
+ expect_recent_payment!(david, RecentPaymentDetails::Pending, payment_id);
+
+ claim_bolt12_payment(david, &[charlie, bob, alice], payment_context);
+ expect_recent_payment!(david, RecentPaymentDetails::Fulfilled, payment_id);
+}
+
+/// Checks that a refund can be paid through blinded paths and that ephemeral pubkeys are used
+/// rather than exposing a node's pubkey.
+#[test]
+fn creates_and_pays_for_refund_using_two_hop_blinded_path() {
+ let mut accept_forward_cfg = test_default_channel_config();
+ accept_forward_cfg.accept_forwards_to_priv_channels = true;
+
+ let mut features = channelmanager::provided_init_features(&accept_forward_cfg);
+ features.set_onion_messages_optional();
+ features.set_route_blinding_optional();
+
+ let chanmon_cfgs = create_chanmon_cfgs(6);
+ let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
+
+ *node_cfgs[1].override_init_features.borrow_mut() = Some(features);
+
+ let node_chanmgrs = create_node_chanmgrs(
+ 6, &node_cfgs, &[None, Some(accept_forward_cfg), None, None, None, None]
+ );
+ let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+ create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 5, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 5, 10_000_000, 1_000_000_000);
+
+ let (alice, bob, charlie, david) = (&nodes[0], &nodes[1], &nodes[2], &nodes[3]);
+ let alice_id = alice.node.get_our_node_id();
+ let bob_id = bob.node.get_our_node_id();
+ let charlie_id = charlie.node.get_our_node_id();
+ let david_id = david.node.get_our_node_id();
+
+ disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5]]);
+ disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]);
+
+ let absolute_expiry = Duration::from_secs(u64::MAX);
+ let payment_id = PaymentId([1; 32]);
+ let refund = david.node
+ .create_refund_builder(
+ "refund".to_string(), 10_000_000, absolute_expiry, payment_id, Retry::Attempts(0), None
+ )
+ .unwrap()
+ .build().unwrap();
+ assert_eq!(refund.amount_msats(), 10_000_000);
+ assert_eq!(refund.absolute_expiry(), Some(absolute_expiry));
+ assert_ne!(refund.payer_id(), david_id);
+ assert!(!refund.paths().is_empty());
+ for path in refund.paths() {
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(charlie_id));
+ }
+ expect_recent_payment!(david, RecentPaymentDetails::AwaitingInvoice, payment_id);
+
+ let payment_context = PaymentContext::Bolt12Refund(Bolt12RefundContext {});
+ let expected_invoice = alice.node.request_refund_payment(&refund).unwrap();
+
+ connect_peers(alice, charlie);
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(charlie_id).unwrap();
+ charlie.onion_messenger.handle_onion_message(&alice_id, &onion_message);
+
+ let onion_message = charlie.onion_messenger.next_onion_message_for_peer(david_id).unwrap();
+ david.onion_messenger.handle_onion_message(&charlie_id, &onion_message);
+
+ let invoice = extract_invoice(david, &onion_message);
+ assert_eq!(invoice, expected_invoice);
+
+ assert_eq!(invoice.amount_msats(), 10_000_000);
+ assert_ne!(invoice.signing_pubkey(), alice_id);
+ assert!(!invoice.payment_paths().is_empty());
+ for (_, path) in invoice.payment_paths() {
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(bob_id));
+ }
+
+ route_bolt12_payment(david, &[charlie, bob, alice], &invoice);
+ expect_recent_payment!(david, RecentPaymentDetails::Pending, payment_id);
+
+ claim_bolt12_payment(david, &[charlie, bob, alice], payment_context);
+ expect_recent_payment!(david, RecentPaymentDetails::Fulfilled, payment_id);
+}
+
+/// Checks that an offer can be paid through a one-hop blinded path and that ephemeral pubkeys are
+/// used rather than exposing a node's pubkey. However, the node's pubkey is still used as the
+/// introduction node of the blinded path.
+#[test]
+fn creates_and_pays_for_offer_using_one_hop_blinded_path() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ let alice = &nodes[0];
+ let alice_id = alice.node.get_our_node_id();
+ let bob = &nodes[1];
+ let bob_id = bob.node.get_our_node_id();
+
+ let offer = alice.node
+ .create_offer_builder("coffee".to_string()).unwrap()
+ .amount_msats(10_000_000)
+ .build().unwrap();
+ assert_ne!(offer.signing_pubkey(), alice_id);
+ assert!(!offer.paths().is_empty());
+ for path in offer.paths() {
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(alice_id));
+ }
+
+ let payment_id = PaymentId([1; 32]);
+ bob.node.pay_for_offer(&offer, None, None, None, payment_id, Retry::Attempts(0), None).unwrap();
+ expect_recent_payment!(bob, RecentPaymentDetails::AwaitingInvoice, payment_id);
+
+ let onion_message = bob.onion_messenger.next_onion_message_for_peer(alice_id).unwrap();
+ alice.onion_messenger.handle_onion_message(&bob_id, &onion_message);
+
+ let (invoice_request, reply_path) = extract_invoice_request(alice, &onion_message);
+ let payment_context = PaymentContext::Bolt12Offer(Bolt12OfferContext {
+ offer_id: offer.id(),
+ invoice_request: InvoiceRequestFields {
+ payer_id: invoice_request.payer_id(),
+ amount_msats: None,
+ features: InvoiceRequestFeatures::empty(),
+ quantity: None,
+ payer_note_truncated: None,
+ },
+ });
+ assert_eq!(invoice_request.amount_msats(), None);
+ assert_ne!(invoice_request.payer_id(), bob_id);
+ assert_eq!(reply_path.unwrap().introduction_node, IntroductionNode::NodeId(bob_id));
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap();
+ bob.onion_messenger.handle_onion_message(&alice_id, &onion_message);
+
+ let invoice = extract_invoice(bob, &onion_message);
+ assert_eq!(invoice.amount_msats(), 10_000_000);
+ assert_ne!(invoice.signing_pubkey(), alice_id);
+ assert!(!invoice.payment_paths().is_empty());
+ for (_, path) in invoice.payment_paths() {
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(alice_id));
+ }
+
+ route_bolt12_payment(bob, &[alice], &invoice);
+ expect_recent_payment!(bob, RecentPaymentDetails::Pending, payment_id);
+
+ claim_bolt12_payment(bob, &[alice], payment_context);
+ expect_recent_payment!(bob, RecentPaymentDetails::Fulfilled, payment_id);
+}
+
+/// Checks that a refund can be paid through a one-hop blinded path and that ephemeral pubkeys are
+/// used rather than exposing a node's pubkey. However, the node's pubkey is still used as the
+/// introduction node of the blinded path.
+#[test]
+fn creates_and_pays_for_refund_using_one_hop_blinded_path() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ let alice = &nodes[0];
+ let alice_id = alice.node.get_our_node_id();
+ let bob = &nodes[1];
+ let bob_id = bob.node.get_our_node_id();
+
+ let absolute_expiry = Duration::from_secs(u64::MAX);
+ let payment_id = PaymentId([1; 32]);
+ let refund = bob.node
+ .create_refund_builder(
+ "refund".to_string(), 10_000_000, absolute_expiry, payment_id, Retry::Attempts(0), None
+ )
+ .unwrap()
+ .build().unwrap();
+ assert_eq!(refund.amount_msats(), 10_000_000);
+ assert_eq!(refund.absolute_expiry(), Some(absolute_expiry));
+ assert_ne!(refund.payer_id(), bob_id);
+ assert!(!refund.paths().is_empty());
+ for path in refund.paths() {
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(bob_id));
+ }
+ expect_recent_payment!(bob, RecentPaymentDetails::AwaitingInvoice, payment_id);
+
+ let payment_context = PaymentContext::Bolt12Refund(Bolt12RefundContext {});
+ let expected_invoice = alice.node.request_refund_payment(&refund).unwrap();
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap();
+ bob.onion_messenger.handle_onion_message(&alice_id, &onion_message);
+
+ let invoice = extract_invoice(bob, &onion_message);
+ assert_eq!(invoice, expected_invoice);
+
+ assert_eq!(invoice.amount_msats(), 10_000_000);
+ assert_ne!(invoice.signing_pubkey(), alice_id);
+ assert!(!invoice.payment_paths().is_empty());
+ for (_, path) in invoice.payment_paths() {
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(alice_id));
+ }
+
+ route_bolt12_payment(bob, &[alice], &invoice);
+ expect_recent_payment!(bob, RecentPaymentDetails::Pending, payment_id);
+
+ claim_bolt12_payment(bob, &[alice], payment_context);
+ expect_recent_payment!(bob, RecentPaymentDetails::Fulfilled, payment_id);
+}
+
+/// Checks that an invoice for an offer without any blinded paths can be requested. Note that while
+/// the requested is sent directly using the node's pubkey, the response and the payment still use
+/// blinded paths as required by the spec.
+#[test]
+fn pays_for_offer_without_blinded_paths() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ let alice = &nodes[0];
+ let alice_id = alice.node.get_our_node_id();
+ let bob = &nodes[1];
+ let bob_id = bob.node.get_our_node_id();
+
+ let offer = alice.node
+ .create_offer_builder("coffee".to_string()).unwrap()
+ .clear_paths()
+ .amount_msats(10_000_000)
+ .build().unwrap();
+ assert_eq!(offer.signing_pubkey(), alice_id);
+ assert!(offer.paths().is_empty());
+
+ let payment_id = PaymentId([1; 32]);
+ bob.node.pay_for_offer(&offer, None, None, None, payment_id, Retry::Attempts(0), None).unwrap();
+ expect_recent_payment!(bob, RecentPaymentDetails::AwaitingInvoice, payment_id);
+
+ let onion_message = bob.onion_messenger.next_onion_message_for_peer(alice_id).unwrap();
+ alice.onion_messenger.handle_onion_message(&bob_id, &onion_message);
+
+ let (invoice_request, _) = extract_invoice_request(alice, &onion_message);
+ let payment_context = PaymentContext::Bolt12Offer(Bolt12OfferContext {
+ offer_id: offer.id(),
+ invoice_request: InvoiceRequestFields {
+ payer_id: invoice_request.payer_id(),
+ amount_msats: None,
+ features: InvoiceRequestFeatures::empty(),
+ quantity: None,
+ payer_note_truncated: None,
+ },
+ });
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap();
+ bob.onion_messenger.handle_onion_message(&alice_id, &onion_message);
+
+ let invoice = extract_invoice(bob, &onion_message);
+ route_bolt12_payment(bob, &[alice], &invoice);
+ expect_recent_payment!(bob, RecentPaymentDetails::Pending, payment_id);
+
+ claim_bolt12_payment(bob, &[alice], payment_context);
+ expect_recent_payment!(bob, RecentPaymentDetails::Fulfilled, payment_id);
+}
+
+/// Checks that a refund without any blinded paths can be paid. Note that while the invoice is sent
+/// directly using the node's pubkey, the payment still use blinded paths as required by the spec.
+#[test]
+fn pays_for_refund_without_blinded_paths() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ let alice = &nodes[0];
+ let alice_id = alice.node.get_our_node_id();
+ let bob = &nodes[1];
+ let bob_id = bob.node.get_our_node_id();
+
+ let absolute_expiry = Duration::from_secs(u64::MAX);
+ let payment_id = PaymentId([1; 32]);
+ let refund = bob.node
+ .create_refund_builder(
+ "refund".to_string(), 10_000_000, absolute_expiry, payment_id, Retry::Attempts(0), None
+ )
+ .unwrap()
+ .clear_paths()
+ .build().unwrap();
+ assert_eq!(refund.payer_id(), bob_id);
+ assert!(refund.paths().is_empty());
+ expect_recent_payment!(bob, RecentPaymentDetails::AwaitingInvoice, payment_id);
+
+ let payment_context = PaymentContext::Bolt12Refund(Bolt12RefundContext {});
+ let expected_invoice = alice.node.request_refund_payment(&refund).unwrap();
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap();
+ bob.onion_messenger.handle_onion_message(&alice_id, &onion_message);
+
+ let invoice = extract_invoice(bob, &onion_message);
+ assert_eq!(invoice, expected_invoice);
+
+ route_bolt12_payment(bob, &[alice], &invoice);
+ expect_recent_payment!(bob, RecentPaymentDetails::Pending, payment_id);
+
+ claim_bolt12_payment(bob, &[alice], payment_context);
+ expect_recent_payment!(bob, RecentPaymentDetails::Fulfilled, payment_id);
+}
+
+/// Fails creating an offer when a blinded path cannot be created without exposing the node's id.
+#[test]
+fn fails_creating_offer_without_blinded_paths() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ match nodes[0].node.create_offer_builder("coffee".to_string()) {
+ Ok(_) => panic!("Expected error"),
+ Err(e) => assert_eq!(e, Bolt12SemanticError::MissingPaths),
+ }
+}
+
+/// Fails creating a refund when a blinded path cannot be created without exposing the node's id.
+#[test]
+fn fails_creating_refund_without_blinded_paths() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ let absolute_expiry = Duration::from_secs(u64::MAX);
+ let payment_id = PaymentId([1; 32]);
+
+ match nodes[0].node.create_refund_builder(
+ "refund".to_string(), 10_000, absolute_expiry, payment_id, Retry::Attempts(0), None
+ ) {
+ Ok(_) => panic!("Expected error"),
+ Err(e) => assert_eq!(e, Bolt12SemanticError::MissingPaths),
+ }
+
+ assert!(nodes[0].node.list_recent_payments().is_empty());
+}
+
+/// Fails creating an invoice request when the offer contains an unsupported chain.
+#[test]
+fn fails_creating_invoice_request_for_unsupported_chain() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ let alice = &nodes[0];
+ let bob = &nodes[1];
+
+ let offer = alice.node
+ .create_offer_builder("coffee".to_string()).unwrap()
+ .clear_chains()
+ .chain(Network::Signet)
+ .build().unwrap();
+
+ let payment_id = PaymentId([1; 32]);
+ match bob.node.pay_for_offer(&offer, None, None, None, payment_id, Retry::Attempts(0), None) {
+ Ok(_) => panic!("Expected error"),
+ Err(e) => assert_eq!(e, Bolt12SemanticError::UnsupportedChain),
+ }
+}
+
+/// Fails requesting a payment when the refund contains an unsupported chain.
+#[test]
+fn fails_sending_invoice_with_unsupported_chain_for_refund() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ let alice = &nodes[0];
+ let bob = &nodes[1];
+
+ let absolute_expiry = Duration::from_secs(u64::MAX);
+ let payment_id = PaymentId([1; 32]);
+ let refund = bob.node
+ .create_refund_builder(
+ "refund".to_string(), 10_000_000, absolute_expiry, payment_id, Retry::Attempts(0), None
+ )
+ .unwrap()
+ .chain(Network::Signet)
+ .build().unwrap();
+
+ match alice.node.request_refund_payment(&refund) {
+ Ok(_) => panic!("Expected error"),
+ Err(e) => assert_eq!(e, Bolt12SemanticError::UnsupportedChain),
+ }
+}
+
+/// Fails creating an invoice request when a blinded reply path cannot be created without exposing
+/// the node's id.
+#[test]
+fn fails_creating_invoice_request_without_blinded_reply_path() {
+ let chanmon_cfgs = create_chanmon_cfgs(6);
+ let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
+ let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+ create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 5, 10_000_000, 1_000_000_000);
+
+ let (alice, bob, charlie, david) = (&nodes[0], &nodes[1], &nodes[2], &nodes[3]);
+
+ disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5]]);
+ disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]);
+
+ let offer = alice.node
+ .create_offer_builder("coffee".to_string()).unwrap()
+ .amount_msats(10_000_000)
+ .build().unwrap();
+
+ let payment_id = PaymentId([1; 32]);
+
+ match david.node.pay_for_offer(&offer, None, None, None, payment_id, Retry::Attempts(0), None) {
+ Ok(_) => panic!("Expected error"),
+ Err(e) => assert_eq!(e, Bolt12SemanticError::MissingPaths),
+ }
+
+ assert!(nodes[0].node.list_recent_payments().is_empty());
+}
+
+#[test]
+fn fails_creating_invoice_request_with_duplicate_payment_id() {
+ let chanmon_cfgs = create_chanmon_cfgs(6);
+ let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
+ let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+ create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 5, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 5, 10_000_000, 1_000_000_000);
+
+ let (alice, _bob, charlie, david) = (&nodes[0], &nodes[1], &nodes[2], &nodes[3]);
+
+ disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5]]);
+
+ let offer = alice.node
+ .create_offer_builder("coffee".to_string()).unwrap()
+ .amount_msats(10_000_000)
+ .build().unwrap();
+
+ let payment_id = PaymentId([1; 32]);
+ assert!(
+ david.node.pay_for_offer(
+ &offer, None, None, None, payment_id, Retry::Attempts(0), None
+ ).is_ok()
+ );
+ expect_recent_payment!(david, RecentPaymentDetails::AwaitingInvoice, payment_id);
+
+ match david.node.pay_for_offer(&offer, None, None, None, payment_id, Retry::Attempts(0), None) {
+ Ok(_) => panic!("Expected error"),
+ Err(e) => assert_eq!(e, Bolt12SemanticError::DuplicatePaymentId),
+ }
+
+ expect_recent_payment!(david, RecentPaymentDetails::AwaitingInvoice, payment_id);
+}
+
+#[test]
+fn fails_creating_refund_with_duplicate_payment_id() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ let absolute_expiry = Duration::from_secs(u64::MAX);
+ let payment_id = PaymentId([1; 32]);
+ assert!(
+ nodes[0].node.create_refund_builder(
+ "refund".to_string(), 10_000, absolute_expiry, payment_id, Retry::Attempts(0), None
+ ).is_ok()
+ );
+ expect_recent_payment!(nodes[0], RecentPaymentDetails::AwaitingInvoice, payment_id);
+
+ match nodes[0].node.create_refund_builder(
+ "refund".to_string(), 10_000, absolute_expiry, payment_id, Retry::Attempts(0), None
+ ) {
+ Ok(_) => panic!("Expected error"),
+ Err(e) => assert_eq!(e, Bolt12SemanticError::DuplicatePaymentId),
+ }
+
+ expect_recent_payment!(nodes[0], RecentPaymentDetails::AwaitingInvoice, payment_id);
+}
+
+#[test]
+fn fails_sending_invoice_without_blinded_payment_paths_for_offer() {
+ let mut accept_forward_cfg = test_default_channel_config();
+ accept_forward_cfg.accept_forwards_to_priv_channels = true;
+
+ // Clearing route_blinding prevents forming any payment paths since the node is unannounced.
+ let mut features = channelmanager::provided_init_features(&accept_forward_cfg);
+ features.set_onion_messages_optional();
+ features.clear_route_blinding();
+
+ let chanmon_cfgs = create_chanmon_cfgs(6);
+ let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
+
+ *node_cfgs[1].override_init_features.borrow_mut() = Some(features);
+
+ let node_chanmgrs = create_node_chanmgrs(
+ 6, &node_cfgs, &[None, Some(accept_forward_cfg), None, None, None, None]
+ );
+ let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+ create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 5, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 5, 10_000_000, 1_000_000_000);
+
+ let (alice, bob, charlie, david) = (&nodes[0], &nodes[1], &nodes[2], &nodes[3]);
+ let alice_id = alice.node.get_our_node_id();
+ let bob_id = bob.node.get_our_node_id();
+ let charlie_id = charlie.node.get_our_node_id();
+ let david_id = david.node.get_our_node_id();
+
+ disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5]]);
+ disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]);
+
+ let offer = alice.node
+ .create_offer_builder("coffee".to_string()).unwrap()
+ .amount_msats(10_000_000)
+ .build().unwrap();
+
+ let payment_id = PaymentId([1; 32]);
+ david.node.pay_for_offer(&offer, None, None, None, payment_id, Retry::Attempts(0), None)
+ .unwrap();
+
+ connect_peers(david, bob);
+
+ let onion_message = david.onion_messenger.next_onion_message_for_peer(bob_id).unwrap();
+ bob.onion_messenger.handle_onion_message(&david_id, &onion_message);
+
+ connect_peers(alice, charlie);
+
+ let onion_message = bob.onion_messenger.next_onion_message_for_peer(alice_id).unwrap();
+ alice.onion_messenger.handle_onion_message(&bob_id, &onion_message);
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(charlie_id).unwrap();
+ charlie.onion_messenger.handle_onion_message(&alice_id, &onion_message);
+
+ let onion_message = charlie.onion_messenger.next_onion_message_for_peer(david_id).unwrap();
+ david.onion_messenger.handle_onion_message(&charlie_id, &onion_message);
+
+ let invoice_error = extract_invoice_error(david, &onion_message);
+ assert_eq!(invoice_error, InvoiceError::from(Bolt12SemanticError::MissingPaths));
+}
+
+#[test]
+fn fails_sending_invoice_without_blinded_payment_paths_for_refund() {
+ let mut accept_forward_cfg = test_default_channel_config();
+ accept_forward_cfg.accept_forwards_to_priv_channels = true;
+
+ // Clearing route_blinding prevents forming any payment paths since the node is unannounced.
+ let mut features = channelmanager::provided_init_features(&accept_forward_cfg);
+ features.set_onion_messages_optional();
+ features.clear_route_blinding();
+
+ let chanmon_cfgs = create_chanmon_cfgs(6);
+ let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
+
+ *node_cfgs[1].override_init_features.borrow_mut() = Some(features);
+
+ let node_chanmgrs = create_node_chanmgrs(
+ 6, &node_cfgs, &[None, Some(accept_forward_cfg), None, None, None, None]
+ );
+ let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+ create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 5, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 5, 10_000_000, 1_000_000_000);
+
+ let (alice, bob, charlie, david) = (&nodes[0], &nodes[1], &nodes[2], &nodes[3]);
+
+ disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5]]);
+ disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]);
+
+ let absolute_expiry = Duration::from_secs(u64::MAX);
+ let payment_id = PaymentId([1; 32]);
+ let refund = david.node
+ .create_refund_builder(
+ "refund".to_string(), 10_000_000, absolute_expiry, payment_id, Retry::Attempts(0), None
+ )
+ .unwrap()
+ .build().unwrap();
+
+ match alice.node.request_refund_payment(&refund) {
+ Ok(_) => panic!("Expected error"),
+ Err(e) => assert_eq!(e, Bolt12SemanticError::MissingPaths),
+ }
+}
+
+#[test]
+fn fails_paying_invoice_more_than_once() {
+ let mut accept_forward_cfg = test_default_channel_config();
+ accept_forward_cfg.accept_forwards_to_priv_channels = true;
+
+ let mut features = channelmanager::provided_init_features(&accept_forward_cfg);
+ features.set_onion_messages_optional();
+ features.set_route_blinding_optional();
+
+ let chanmon_cfgs = create_chanmon_cfgs(6);
+ let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
+
+ *node_cfgs[1].override_init_features.borrow_mut() = Some(features);
+
+ let node_chanmgrs = create_node_chanmgrs(
+ 6, &node_cfgs, &[None, Some(accept_forward_cfg), None, None, None, None]
+ );
+ let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+ create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 5, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 5, 10_000_000, 1_000_000_000);
+
+ let (alice, bob, charlie, david) = (&nodes[0], &nodes[1], &nodes[2], &nodes[3]);
+ let alice_id = alice.node.get_our_node_id();
+ let bob_id = bob.node.get_our_node_id();
+ let charlie_id = charlie.node.get_our_node_id();
+ let david_id = david.node.get_our_node_id();
+
+ disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5]]);
+ disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]);
+
+ let absolute_expiry = Duration::from_secs(u64::MAX);
+ let payment_id = PaymentId([1; 32]);
+ let refund = david.node
+ .create_refund_builder(
+ "refund".to_string(), 10_000_000, absolute_expiry, payment_id, Retry::Attempts(0), None
+ )
+ .unwrap()
+ .build().unwrap();
+ expect_recent_payment!(david, RecentPaymentDetails::AwaitingInvoice, payment_id);
+
+ // Alice sends the first invoice
+ alice.node.request_refund_payment(&refund).unwrap();
+
+ connect_peers(alice, charlie);
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(charlie_id).unwrap();
+ charlie.onion_messenger.handle_onion_message(&alice_id, &onion_message);
+
+ let onion_message = charlie.onion_messenger.next_onion_message_for_peer(david_id).unwrap();
+ david.onion_messenger.handle_onion_message(&charlie_id, &onion_message);
+
+ // David pays the first invoice
+ let payment_context = PaymentContext::Bolt12Refund(Bolt12RefundContext {});
+ let invoice1 = extract_invoice(david, &onion_message);
+
+ route_bolt12_payment(david, &[charlie, bob, alice], &invoice1);
+ expect_recent_payment!(david, RecentPaymentDetails::Pending, payment_id);
+
+ claim_bolt12_payment(david, &[charlie, bob, alice], payment_context);
+ expect_recent_payment!(david, RecentPaymentDetails::Fulfilled, payment_id);
+
+ disconnect_peers(alice, &[charlie]);
+
+ // Alice sends the second invoice
+ alice.node.request_refund_payment(&refund).unwrap();
+
+ connect_peers(alice, charlie);
+ connect_peers(david, bob);
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(charlie_id).unwrap();
+ charlie.onion_messenger.handle_onion_message(&alice_id, &onion_message);
+
+ let onion_message = charlie.onion_messenger.next_onion_message_for_peer(david_id).unwrap();
+ david.onion_messenger.handle_onion_message(&charlie_id, &onion_message);
+
+ let invoice2 = extract_invoice(david, &onion_message);
+ assert_eq!(invoice1.payer_metadata(), invoice2.payer_metadata());
+
+ // David sends an error instead of paying the second invoice
+ let onion_message = david.onion_messenger.next_onion_message_for_peer(bob_id).unwrap();
+ bob.onion_messenger.handle_onion_message(&david_id, &onion_message);
+
+ let onion_message = bob.onion_messenger.next_onion_message_for_peer(alice_id).unwrap();
+ alice.onion_messenger.handle_onion_message(&bob_id, &onion_message);
+
+ let invoice_error = extract_invoice_error(alice, &onion_message);
+ assert_eq!(invoice_error, InvoiceError::from_string("DuplicateInvoice".to_string()));
+}
use crate::sign::{NodeSigner, Recipient};
use crate::util::logger::Logger;
+#[allow(unused_imports)]
use crate::prelude::*;
+
use core::ops::Deref;
/// Invalid inbound onion payment.
) -> Result<PendingHTLCInfo, InboundHTLCErr> {
let (
payment_data, keysend_preimage, custom_tlvs, onion_amt_msat, onion_cltv_expiry,
- payment_metadata, requires_blinded_error
+ payment_metadata, payment_context, requires_blinded_error
) = match hop_data {
msgs::InboundOnionPayload::Receive {
payment_data, keysend_preimage, custom_tlvs, sender_intended_htlc_amt_msat,
cltv_expiry_height, payment_metadata, ..
} =>
(payment_data, keysend_preimage, custom_tlvs, sender_intended_htlc_amt_msat,
- cltv_expiry_height, payment_metadata, false),
+ cltv_expiry_height, payment_metadata, None, false),
msgs::InboundOnionPayload::BlindedReceive {
sender_intended_htlc_amt_msat, total_msat, cltv_expiry_height, payment_secret,
- intro_node_blinding_point, payment_constraints, ..
+ intro_node_blinding_point, payment_constraints, payment_context, keysend_preimage,
+ custom_tlvs
} => {
check_blinded_payment_constraints(
sender_intended_htlc_amt_msat, cltv_expiry, &payment_constraints
}
})?;
let payment_data = msgs::FinalOnionHopData { payment_secret, total_msat };
- (Some(payment_data), None, Vec::new(), sender_intended_htlc_amt_msat, cltv_expiry_height,
- None, intro_node_blinding_point.is_none())
+ (Some(payment_data), keysend_preimage, custom_tlvs,
+ sender_intended_htlc_amt_msat, cltv_expiry_height, None, Some(payment_context),
+ intro_node_blinding_point.is_none())
}
msgs::InboundOnionPayload::Forward { .. } => {
return Err(InboundHTLCErr {
payment_metadata,
incoming_cltv_expiry: onion_cltv_expiry,
custom_tlvs,
+ requires_blinded_error,
}
} else if let Some(data) = payment_data {
PendingHTLCRouting::Receive {
payment_data: data,
payment_metadata,
+ payment_context,
incoming_cltv_expiry: onion_cltv_expiry,
phantom_shared_secret,
custom_tlvs,
use crate::routing::router::{get_route, PaymentParameters, Route, RouteParameters, RouteHint, RouteHintHop};
use crate::ln::features::{InitFeatures, Bolt11InvoiceFeatures};
use crate::ln::msgs;
-use crate::ln::msgs::{ChannelMessageHandler, ChannelUpdate};
+use crate::ln::msgs::{ChannelMessageHandler, ChannelUpdate, OutboundTrampolinePayload};
use crate::ln::wire::Encode;
use crate::util::ser::{Writeable, Writer, BigSize};
use crate::util::test_utils;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::secp256k1;
-use bitcoin::secp256k1::{Secp256k1, SecretKey};
+use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
use crate::io;
use crate::prelude::*;
-use core::default::Default;
+use bitcoin::hashes::hex::FromHex;
use crate::ln::functional_test_utils::*;
}
}
+#[test]
+fn test_trampoline_onion_payload_serialization() {
+ // As per https://github.com/lightning/bolts/blob/c01d2e6267d4a8d1095f0f1188970055a9a22d29/bolt04/trampoline-payment-onion-test.json#L3
+ let trampoline_payload = OutboundTrampolinePayload::Forward {
+ amt_to_forward: 100000000,
+ outgoing_cltv_value: 800000,
+ outgoing_node_id: PublicKey::from_slice(&<Vec<u8>>::from_hex("02edabbd16b41c8371b92ef2f04c1185b4f03b6dcd52ba9b78d9d7c89c8f221145").unwrap()).unwrap(),
+ };
+
+ let slice_to_hex = |slice: &[u8]| {
+ slice.iter()
+ .map(|b| format!("{:02x}", b).to_string())
+ .collect::<String>()
+ };
+
+ let carol_payload_hex = slice_to_hex(&trampoline_payload.encode());
+ assert_eq!(carol_payload_hex, "2e020405f5e10004030c35000e2102edabbd16b41c8371b92ef2f04c1185b4f03b6dcd52ba9b78d9d7c89c8f221145");
+}
+
fn do_test_fail_htlc_backwards_with_reason(failure_code: FailureCode) {
let chanmon_cfgs = create_chanmon_cfgs(2);
// Ensure the payment fails with the expected error.
let mut error_data = recv_value_msat.to_be_bytes().to_vec();
error_data.extend_from_slice(
- &nodes[0].node.best_block.read().unwrap().height().to_be_bytes(),
+ &nodes[0].node.best_block.read().unwrap().height.to_be_bytes(),
);
let mut fail_conditions = PaymentFailedConditions::new()
.blamed_scid(phantom_scid)
// Ensure the payment fails with the expected error.
let mut error_data = bad_recv_amt_msat.to_be_bytes().to_vec();
- error_data.extend_from_slice(&nodes[1].node.best_block.read().unwrap().height().to_be_bytes());
+ error_data.extend_from_slice(&nodes[1].node.best_block.read().unwrap().height.to_be_bytes());
let mut fail_conditions = PaymentFailedConditions::new()
.blamed_scid(phantom_scid)
.expected_htlc_error_data(0x4000 | 15, &error_data);
// Ensure the payment fails with the expected error.
let mut error_data = recv_amt_msat.to_be_bytes().to_vec();
- error_data.extend_from_slice(&nodes[1].node.best_block.read().unwrap().height().to_be_bytes());
+ error_data.extend_from_slice(&nodes[1].node.best_block.read().unwrap().height.to_be_bytes());
let mut fail_conditions = PaymentFailedConditions::new()
.blamed_scid(phantom_scid)
.expected_htlc_error_data(0x4000 | 15, &error_data);
// You may not use this file except in accordance with one or both of these
// licenses.
-use crate::ln::{PaymentHash, PaymentPreimage};
+use crate::crypto::chacha20::ChaCha20;
+use crate::crypto::streams::ChaChaReader;
use crate::ln::channelmanager::{HTLCSource, RecipientOnionFields};
use crate::ln::msgs;
use crate::ln::wire::Encode;
+use crate::ln::{PaymentHash, PaymentPreimage};
use crate::routing::gossip::NetworkUpdate;
use crate::routing::router::{BlindedTail, Path, RouteHop};
use crate::sign::NodeSigner;
-use crate::crypto::chacha20::ChaCha20;
-use crate::crypto::streams::ChaChaReader;
use crate::util::errors::{self, APIError};
-use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, LengthCalculatingWriter};
use crate::util::logger::Logger;
+use crate::util::ser::{LengthCalculatingWriter, Readable, ReadableArgs, Writeable, Writer};
-use bitcoin::hashes::{Hash, HashEngine};
use bitcoin::hashes::cmp::fixed_time_eq;
use bitcoin::hashes::hmac::{Hmac, HmacEngine};
use bitcoin::hashes::sha256::Hash as Sha256;
+use bitcoin::hashes::{Hash, HashEngine};
-use bitcoin::secp256k1::{SecretKey, PublicKey, Scalar};
-use bitcoin::secp256k1::Secp256k1;
-use bitcoin::secp256k1::ecdh::SharedSecret;
use bitcoin::secp256k1;
+use bitcoin::secp256k1::ecdh::SharedSecret;
+use bitcoin::secp256k1::{PublicKey, Scalar, Secp256k1, SecretKey};
-use crate::prelude::*;
use crate::io::{Cursor, Read};
-use core::convert::{AsMut, TryInto};
use core::ops::Deref;
+#[allow(unused_imports)]
+use crate::prelude::*;
+
pub(crate) struct OnionKeys {
#[cfg(test)]
pub(crate) shared_secret: SharedSecret,
#[inline]
pub(crate) fn gen_rho_mu_from_shared_secret(shared_secret: &[u8]) -> ([u8; 32], [u8; 32]) {
assert_eq!(shared_secret.len(), 32);
- ({
- let mut hmac = HmacEngine::<Sha256>::new(&[0x72, 0x68, 0x6f]); // rho
- hmac.input(&shared_secret);
- Hmac::from_engine(hmac).to_byte_array()
- },
- {
- let mut hmac = HmacEngine::<Sha256>::new(&[0x6d, 0x75]); // mu
- hmac.input(&shared_secret);
- Hmac::from_engine(hmac).to_byte_array()
- })
+ let mut engine_rho = HmacEngine::<Sha256>::new(b"rho");
+ engine_rho.input(&shared_secret);
+ let hmac_rho = Hmac::from_engine(engine_rho).to_byte_array();
+
+ let mut engine_mu = HmacEngine::<Sha256>::new(b"mu");
+ engine_mu.input(&shared_secret);
+ let hmac_mu = Hmac::from_engine(engine_mu).to_byte_array();
+
+ (hmac_rho, hmac_mu)
}
#[inline]
/// Calculates a pubkey for the next hop, such as the next hop's packet pubkey or blinding point.
pub(crate) fn next_hop_pubkey<T: secp256k1::Verification>(
- secp_ctx: &Secp256k1<T>, curr_pubkey: PublicKey, shared_secret: &[u8]
+ secp_ctx: &Secp256k1<T>, curr_pubkey: PublicKey, shared_secret: &[u8],
) -> Result<PublicKey, secp256k1::Error> {
let blinding_factor = {
let mut sha = Sha256::engine();
// can only fail if an intermediary hop has an invalid public key or session_priv is invalid
#[inline]
pub(super) fn construct_onion_keys_callback<T, FType>(
- secp_ctx: &Secp256k1<T>, path: &Path, session_priv: &SecretKey, mut callback: FType
+ secp_ctx: &Secp256k1<T>, path: &Path, session_priv: &SecretKey, mut callback: FType,
) -> Result<(), secp256k1::Error>
where
T: secp256k1::Signing,
- FType: FnMut(SharedSecret, [u8; 32], PublicKey, Option<&RouteHop>, usize)
+ FType: FnMut(SharedSecret, [u8; 32], PublicKey, Option<&RouteHop>, usize),
{
let mut blinded_priv = session_priv.clone();
let mut blinded_pub = PublicKey::from_secret_key(secp_ctx, &blinded_priv);
let unblinded_hops_iter = path.hops.iter().map(|h| (&h.pubkey, Some(h)));
- let blinded_pks_iter = path.blinded_tail.as_ref()
- .map(|t| t.hops.iter()).unwrap_or([].iter())
+ let blinded_pks_iter = path
+ .blinded_tail
+ .as_ref()
+ .map(|t| t.hops.iter())
+ .unwrap_or([].iter())
.skip(1) // Skip the intro node because it's included in the unblinded hops
.map(|h| (&h.blinded_node_id, None));
for (idx, (pubkey, route_hop_opt)) in unblinded_hops_iter.chain(blinded_pks_iter).enumerate() {
}
// can only fail if an intermediary hop has an invalid public key or session_priv is invalid
-pub(super) fn construct_onion_keys<T: secp256k1::Signing>(secp_ctx: &Secp256k1<T>, path: &Path, session_priv: &SecretKey) -> Result<Vec<OnionKeys>, secp256k1::Error> {
+pub(super) fn construct_onion_keys<T: secp256k1::Signing>(
+ secp_ctx: &Secp256k1<T>, path: &Path, session_priv: &SecretKey,
+) -> Result<Vec<OnionKeys>, secp256k1::Error> {
let mut res = Vec::with_capacity(path.hops.len());
- construct_onion_keys_callback(secp_ctx, &path, session_priv,
- |shared_secret, _blinding_factor, ephemeral_pubkey, _, _|
- {
- let (rho, mu) = gen_rho_mu_from_shared_secret(shared_secret.as_ref());
+ construct_onion_keys_callback(
+ secp_ctx,
+ &path,
+ session_priv,
+ |shared_secret, _blinding_factor, ephemeral_pubkey, _, _| {
+ let (rho, mu) = gen_rho_mu_from_shared_secret(shared_secret.as_ref());
- res.push(OnionKeys {
- #[cfg(test)]
- shared_secret,
- #[cfg(test)]
- blinding_factor: _blinding_factor,
- ephemeral_pubkey,
- rho,
- mu,
- });
- })?;
+ res.push(OnionKeys {
+ #[cfg(test)]
+ shared_secret,
+ #[cfg(test)]
+ blinding_factor: _blinding_factor,
+ ephemeral_pubkey,
+ rho,
+ mu,
+ });
+ },
+ )?;
Ok(res)
}
/// returns the hop data, as well as the first-hop value_msat and CLTV value we should send.
-pub(super) fn build_onion_payloads(path: &Path, total_msat: u64, mut recipient_onion: RecipientOnionFields, starting_htlc_offset: u32, keysend_preimage: &Option<PaymentPreimage>) -> Result<(Vec<msgs::OutboundOnionPayload>, u64, u32), APIError> {
+pub(super) fn build_onion_payloads(
+ path: &Path, total_msat: u64, mut recipient_onion: RecipientOnionFields,
+ starting_htlc_offset: u32, keysend_preimage: &Option<PaymentPreimage>,
+) -> Result<(Vec<msgs::OutboundOnionPayload>, u64, u32), APIError> {
let mut cur_value_msat = 0u64;
let mut cur_cltv = starting_htlc_offset;
let mut last_short_channel_id = 0;
let mut res: Vec<msgs::OutboundOnionPayload> = Vec::with_capacity(
- path.hops.len() + path.blinded_tail.as_ref().map_or(0, |t| t.hops.len())
+ path.hops.len() + path.blinded_tail.as_ref().map_or(0, |t| t.hops.len()),
);
for (idx, hop) in path.hops.iter().rev().enumerate() {
// exactly as it should be (and the next hop isn't trying to probe to find out if we're
// the intended recipient).
let value_msat = if cur_value_msat == 0 { hop.fee_msat } else { cur_value_msat };
- let cltv = if cur_cltv == starting_htlc_offset { hop.cltv_expiry_delta + starting_htlc_offset } else { cur_cltv };
+ let cltv = if cur_cltv == starting_htlc_offset {
+ hop.cltv_expiry_delta + starting_htlc_offset
+ } else {
+ cur_cltv
+ };
if idx == 0 {
if let Some(BlindedTail {
- blinding_point, hops, final_value_msat, excess_final_cltv_expiry_delta, ..
- }) = &path.blinded_tail {
+ blinding_point,
+ hops,
+ final_value_msat,
+ excess_final_cltv_expiry_delta,
+ ..
+ }) = &path.blinded_tail
+ {
let mut blinding_point = Some(*blinding_point);
for (i, blinded_hop) in hops.iter().enumerate() {
if i == hops.len() - 1 {
cltv_expiry_height: cur_cltv + excess_final_cltv_expiry_delta,
encrypted_tlvs: blinded_hop.encrypted_payload.clone(),
intro_node_blinding_point: blinding_point.take(),
+ keysend_preimage: *keysend_preimage,
+ custom_tlvs: recipient_onion.custom_tlvs.clone(),
});
} else {
res.push(msgs::OutboundOnionPayload::BlindedForward {
} else {
res.push(msgs::OutboundOnionPayload::Receive {
payment_data: if let Some(secret) = recipient_onion.payment_secret.take() {
- Some(msgs::FinalOnionHopData {
- payment_secret: secret,
- total_msat,
- })
- } else { None },
+ Some(msgs::FinalOnionHopData { payment_secret: secret, total_msat })
+ } else {
+ None
+ },
payment_metadata: recipient_onion.payment_metadata.take(),
keysend_preimage: *keysend_preimage,
custom_tlvs: recipient_onion.custom_tlvs.clone(),
});
}
} else {
- res.insert(0, msgs::OutboundOnionPayload::Forward {
+ let payload = msgs::OutboundOnionPayload::Forward {
short_channel_id: last_short_channel_id,
amt_to_forward: value_msat,
outgoing_cltv_value: cltv,
- });
+ };
+ res.insert(0, payload);
}
cur_value_msat += hop.fee_msat;
if cur_value_msat >= 21000000 * 100000000 * 1000 {
- return Err(APIError::InvalidRoute{err: "Channel fees overflowed?".to_owned()});
+ return Err(APIError::InvalidRoute { err: "Channel fees overflowed?".to_owned() });
}
cur_cltv += hop.cltv_expiry_delta as u32;
if cur_cltv >= 500000000 {
- return Err(APIError::InvalidRoute{err: "Channel CLTV overflowed?".to_owned()});
+ return Err(APIError::InvalidRoute { err: "Channel CLTV overflowed?".to_owned() });
}
last_short_channel_id = hop.short_channel_id;
}
/// Length of the onion data packet. Before TLV-based onions this was 20 65-byte hops, though now
/// the hops can be of variable length.
-pub(crate) const ONION_DATA_LEN: usize = 20*65;
+pub(crate) const ONION_DATA_LEN: usize = 20 * 65;
pub(super) const INVALID_ONION_BLINDING: u16 = 0x8000 | 0x4000 | 24;
#[inline]
fn shift_slice_right(arr: &mut [u8], amt: usize) {
for i in (amt..arr.len()).rev() {
- arr[i] = arr[i-amt];
+ arr[i] = arr[i - amt];
}
for i in 0..amt {
arr[i] = 0;
pub(super) fn construct_onion_packet(
payloads: Vec<msgs::OutboundOnionPayload>, onion_keys: Vec<OnionKeys>, prng_seed: [u8; 32],
- associated_data: &PaymentHash
+ associated_data: &PaymentHash,
) -> Result<msgs::OnionPacket, ()> {
let mut packet_data = [0; ONION_DATA_LEN];
let mut chacha = ChaCha20::new(&prng_seed, &[0; 8]);
chacha.process(&[0; ONION_DATA_LEN], &mut packet_data);
+ let packet = FixedSizeOnionPacket(packet_data);
+ construct_onion_packet_with_init_noise::<_, _>(
+ payloads,
+ onion_keys,
+ packet,
+ Some(associated_data),
+ )
+}
+
+#[allow(unused)]
+pub(super) fn construct_trampoline_onion_packet(
+ payloads: Vec<msgs::OutboundTrampolinePayload>, onion_keys: Vec<OnionKeys>,
+ prng_seed: [u8; 32], associated_data: &PaymentHash, length: u16,
+) -> Result<msgs::TrampolineOnionPacket, ()> {
+ let mut packet_data = vec![0u8; length as usize];
+
+ let mut chacha = ChaCha20::new(&prng_seed, &[0; 8]);
+ chacha.process(&vec![0u8; length as usize], &mut packet_data);
+
construct_onion_packet_with_init_noise::<_, _>(
- payloads, onion_keys, FixedSizeOnionPacket(packet_data), Some(associated_data))
+ payloads,
+ onion_keys,
+ packet_data,
+ Some(associated_data),
+ )
}
#[cfg(test)]
/// Used in testing to write bogus `BogusOnionHopData` as well as `RawOnionHopData`, which is
/// otherwise not representable in `msgs::OnionHopData`.
-pub(super) fn construct_onion_packet_with_writable_hopdata<HD: Writeable>(payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, prng_seed: [u8; 32], associated_data: &PaymentHash) -> Result<msgs::OnionPacket, ()> {
+pub(super) fn construct_onion_packet_with_writable_hopdata<HD: Writeable>(
+ payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, prng_seed: [u8; 32],
+ associated_data: &PaymentHash,
+) -> Result<msgs::OnionPacket, ()> {
let mut packet_data = [0; ONION_DATA_LEN];
let mut chacha = ChaCha20::new(&prng_seed, &[0; 8]);
chacha.process(&[0; ONION_DATA_LEN], &mut packet_data);
+ let packet = FixedSizeOnionPacket(packet_data);
construct_onion_packet_with_init_noise::<_, _>(
- payloads, onion_keys, FixedSizeOnionPacket(packet_data), Some(associated_data))
+ payloads,
+ onion_keys,
+ packet,
+ Some(associated_data),
+ )
}
/// Since onion message packets and onion payment packets have different lengths but are otherwise
}
pub(crate) fn construct_onion_message_packet<HD: Writeable, P: Packet<Data = Vec<u8>>>(
- payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, prng_seed: [u8; 32], packet_data_len: usize) -> Result<P, ()>
-{
+ payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, prng_seed: [u8; 32], packet_data_len: usize,
+) -> Result<P, ()> {
let mut packet_data = vec![0; packet_data_len];
let mut chacha = ChaCha20::new(&prng_seed, &[0; 8]);
}
fn construct_onion_packet_with_init_noise<HD: Writeable, P: Packet>(
- mut payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, mut packet_data: P::Data, associated_data: Option<&PaymentHash>) -> Result<P, ()>
-{
+ mut payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, mut packet_data: P::Data,
+ associated_data: Option<&PaymentHash>,
+) -> Result<P, ()> {
let filler = {
let packet_data = packet_data.as_mut();
const ONION_HOP_DATA_LEN: usize = 65; // We may decrease this eventually after TLV is common
let mut pos = 0;
for (i, (payload, keys)) in payloads.iter().zip(onion_keys.iter()).enumerate() {
let mut chacha = ChaCha20::new(&keys.rho, &[0u8; 8]);
- for _ in 0..(packet_data.len() - pos) { // TODO: Batch this.
+ // TODO: Batch this.
+ for _ in 0..(packet_data.len() - pos) {
let mut dummy = [0; 1];
chacha.process_in_place(&mut dummy); // We don't have a seek function :(
}
return Err(());
}
- if i == payloads.len() - 1 { break; }
+ if i == payloads.len() - 1 {
+ break;
+ }
res.resize(pos, 0u8);
chacha.process_in_place(&mut res);
/// Encrypts a failure packet. raw_packet can either be a
/// msgs::DecodedOnionErrorPacket.encode() result or a msgs::OnionErrorPacket.data element.
-pub(super) fn encrypt_failure_packet(shared_secret: &[u8], raw_packet: &[u8]) -> msgs::OnionErrorPacket {
+pub(super) fn encrypt_failure_packet(
+ shared_secret: &[u8], raw_packet: &[u8],
+) -> msgs::OnionErrorPacket {
let ammag = gen_ammag_from_shared_secret(&shared_secret);
let mut packet_crypted = Vec::with_capacity(raw_packet.len());
packet_crypted.resize(raw_packet.len(), 0);
let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]);
chacha.process(&raw_packet, &mut packet_crypted[..]);
- msgs::OnionErrorPacket {
- data: packet_crypted,
- }
+ msgs::OnionErrorPacket { data: packet_crypted }
}
-pub(super) fn build_failure_packet(shared_secret: &[u8], failure_type: u16, failure_data: &[u8]) -> msgs::DecodedOnionErrorPacket {
+pub(super) fn build_failure_packet(
+ shared_secret: &[u8], failure_type: u16, failure_data: &[u8],
+) -> msgs::DecodedOnionErrorPacket {
assert_eq!(shared_secret.len(), 32);
assert!(failure_data.len() <= 256 - 2);
res.resize(256 - 2 - failure_data.len(), 0);
res
};
- let mut packet = msgs::DecodedOnionErrorPacket {
- hmac: [0; 32],
- failuremsg,
- pad,
- };
+ let mut packet = msgs::DecodedOnionErrorPacket { hmac: [0; 32], failuremsg, pad };
let mut hmac = HmacEngine::<Sha256>::new(&um);
hmac.input(&packet.encode()[32..]);
}
#[cfg(test)]
-pub(super) fn build_first_hop_failure_packet(shared_secret: &[u8], failure_type: u16, failure_data: &[u8]) -> msgs::OnionErrorPacket {
+pub(super) fn build_first_hop_failure_packet(
+ shared_secret: &[u8], failure_type: u16, failure_data: &[u8],
+) -> msgs::OnionErrorPacket {
let failure_packet = build_failure_packet(shared_secret, failure_type, failure_data);
encrypt_failure_packet(shared_secret, &failure_packet.encode()[..])
}
/// Note that we always decrypt `packet` in-place here even if the deserialization into
/// [`msgs::DecodedOnionErrorPacket`] ultimately fails.
fn decrypt_onion_error_packet(
- packet: &mut Vec<u8>, shared_secret: SharedSecret
+ packet: &mut Vec<u8>, shared_secret: SharedSecret,
) -> Result<msgs::DecodedOnionErrorPacket, msgs::DecodeError> {
let ammag = gen_ammag_from_shared_secret(shared_secret.as_ref());
let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]);
/// OutboundRoute).
#[inline]
pub(super) fn process_onion_failure<T: secp256k1::Signing, L: Deref>(
- secp_ctx: &Secp256k1<T>, logger: &L, htlc_source: &HTLCSource, mut encrypted_packet: Vec<u8>
-) -> DecodedOnionFailure where L::Target: Logger {
- let (path, session_priv, first_hop_htlc_msat) = if let &HTLCSource::OutboundRoute {
- ref path, ref session_priv, ref first_hop_htlc_msat, ..
- } = htlc_source {
- (path, session_priv, first_hop_htlc_msat)
- } else { unreachable!() };
+ secp_ctx: &Secp256k1<T>, logger: &L, htlc_source: &HTLCSource, mut encrypted_packet: Vec<u8>,
+) -> DecodedOnionFailure
+where
+ L::Target: Logger,
+{
+ let (path, session_priv, first_hop_htlc_msat) = match htlc_source {
+ HTLCSource::OutboundRoute {
+ ref path, ref session_priv, ref first_hop_htlc_msat, ..
+ } => (path, session_priv, first_hop_htlc_msat),
+ _ => {
+ unreachable!()
+ },
+ };
// Learnings from the HTLC failure to inform future payment retries and scoring.
struct FailureLearnings {
const UPDATE: u16 = 0x1000;
// Handle packed channel/node updates for passing back for the route handler
- construct_onion_keys_callback(secp_ctx, &path, session_priv,
- |shared_secret, _, _, route_hop_opt, route_hop_idx|
- {
- if res.is_some() { return; }
+ let callback = |shared_secret, _, _, route_hop_opt: Option<&RouteHop>, route_hop_idx| {
+ if res.is_some() {
+ return;
+ }
let route_hop = match route_hop_opt {
Some(hop) => hop,
error_code_ret = Some(BADONION | PERM | 24); // invalid_onion_blinding
error_packet_ret = Some(vec![0; 32]);
res = Some(FailureLearnings {
- network_update: None, short_channel_id: None, payment_failed_permanently: false,
+ network_update: None,
+ short_channel_id: None,
+ payment_failed_permanently: false,
failed_within_blinded_path: true,
});
- return
+ return;
},
};
let num_blinded_hops = path.blinded_tail.as_ref().map_or(0, |bt| bt.hops.len());
// For 1-hop blinded paths, the final `path.hops` entry is the recipient.
is_from_final_node = route_hop_idx + 1 == path.hops.len() && num_blinded_hops <= 1;
- let failing_route_hop = if is_from_final_node { route_hop } else {
+ let failing_route_hop = if is_from_final_node {
+ route_hop
+ } else {
match path.hops.get(route_hop_idx + 1) {
Some(hop) => hop,
None => {
// The failing hop is within a multi-hop blinded path.
- #[cfg(not(test))] {
+ #[cfg(not(test))]
+ {
error_code_ret = Some(BADONION | PERM | 24); // invalid_onion_blinding
error_packet_ret = Some(vec![0; 32]);
}
- #[cfg(test)] {
+ #[cfg(test)]
+ {
// Actually parse the onion error data in tests so we can check that blinded hops fail
// back correctly.
- let err_packet = decrypt_onion_error_packet(
- &mut encrypted_packet, shared_secret
- ).unwrap();
- error_code_ret =
- Some(u16::from_be_bytes(err_packet.failuremsg.get(0..2).unwrap().try_into().unwrap()));
+ let err_packet =
+ decrypt_onion_error_packet(&mut encrypted_packet, shared_secret)
+ .unwrap();
+ error_code_ret = Some(u16::from_be_bytes(
+ err_packet.failuremsg.get(0..2).unwrap().try_into().unwrap(),
+ ));
error_packet_ret = Some(err_packet.failuremsg[2..].to_vec());
}
res = Some(FailureLearnings {
- network_update: None, short_channel_id: None, payment_failed_permanently: false,
+ network_update: None,
+ short_channel_id: None,
+ payment_failed_permanently: false,
failed_within_blinded_path: true,
});
- return
- }
+ return;
+ },
}
};
let err_packet = match decrypt_onion_error_packet(&mut encrypted_packet, shared_secret) {
Ok(p) => p,
- Err(_) => return
+ Err(_) => return,
};
let um = gen_um_from_shared_secret(shared_secret.as_ref());
let mut hmac = HmacEngine::<Sha256>::new(&um);
hmac.input(&err_packet.encode()[32..]);
- if !fixed_time_eq(&Hmac::from_engine(hmac).to_byte_array(), &err_packet.hmac) { return }
+ if !fixed_time_eq(&Hmac::from_engine(hmac).to_byte_array(), &err_packet.hmac) {
+ return;
+ }
let error_code_slice = match err_packet.failuremsg.get(0..2) {
Some(s) => s,
None => {
});
let short_channel_id = Some(route_hop.short_channel_id);
res = Some(FailureLearnings {
- network_update, short_channel_id, payment_failed_permanently: is_from_final_node,
- failed_within_blinded_path: false
+ network_update,
+ short_channel_id,
+ payment_failed_permanently: is_from_final_node,
+ failed_within_blinded_path: false,
});
- return
- }
+ return;
+ },
};
let error_code = u16::from_be_bytes(error_code_slice.try_into().expect("len is 2"));
// indicate that payment parameter has failed and no need to update Route object
let payment_failed = match error_code & 0xff {
- 15|16|17|18|19|23 => true,
+ 15 | 16 | 17 | 18 | 19 | 23 => true,
_ => false,
} && is_from_final_node; // PERM bit observed below even if this error is from the intermediate nodes
});
} else if error_code & NODE == NODE {
let is_permanent = error_code & PERM == PERM;
- network_update = Some(NetworkUpdate::NodeFailure { node_id: route_hop.pubkey, is_permanent });
+ network_update =
+ Some(NetworkUpdate::NodeFailure { node_id: route_hop.pubkey, is_permanent });
short_channel_id = Some(route_hop.short_channel_id);
} else if error_code & PERM == PERM {
if !payment_failed {
short_channel_id = Some(failing_route_hop.short_channel_id);
}
} else if error_code & UPDATE == UPDATE {
- if let Some(update_len_slice) = err_packet.failuremsg.get(debug_field_size+2..debug_field_size+4) {
- let update_len = u16::from_be_bytes(update_len_slice.try_into().expect("len is 2")) as usize;
- if let Some(mut update_slice) = err_packet.failuremsg.get(debug_field_size + 4..debug_field_size + 4 + update_len) {
+ if let Some(update_len_slice) =
+ err_packet.failuremsg.get(debug_field_size + 2..debug_field_size + 4)
+ {
+ let update_len =
+ u16::from_be_bytes(update_len_slice.try_into().expect("len is 2")) as usize;
+ if let Some(mut update_slice) = err_packet
+ .failuremsg
+ .get(debug_field_size + 4..debug_field_size + 4 + update_len)
+ {
// Historically, the BOLTs were unclear if the message type
// bytes should be included here or not. The BOLTs have now
// been updated to indicate that they *are* included, but many
// permissiveness introduces the (although small) possibility
// that we fail to decode legitimate channel updates that
// happen to start with ChannelUpdate::TYPE, i.e., [0x01, 0x02].
- if update_slice.len() > 2 && update_slice[0..2] == msgs::ChannelUpdate::TYPE.to_be_bytes() {
+ if update_slice.len() > 2
+ && update_slice[0..2] == msgs::ChannelUpdate::TYPE.to_be_bytes()
+ {
update_slice = &update_slice[2..];
} else {
log_trace!(logger, "Failure provided features a channel update without type prefix. Deprecated, but allowing for now.");
// MAY treat the channel_update as invalid.
let is_chan_update_invalid = match error_code & 0xff {
7 => false,
- 11 => update_opt.is_ok() &&
- amt_to_forward >
- update_opt.as_ref().unwrap().contents.htlc_minimum_msat,
- 12 => update_opt.is_ok() && amt_to_forward
- .checked_mul(update_opt.as_ref().unwrap()
- .contents.fee_proportional_millionths as u64)
- .map(|prop_fee| prop_fee / 1_000_000)
- .and_then(|prop_fee| prop_fee.checked_add(
- update_opt.as_ref().unwrap().contents.fee_base_msat as u64))
- .map(|fee_msats| route_hop.fee_msat >= fee_msats)
- .unwrap_or(false),
- 13 => update_opt.is_ok() &&
- route_hop.cltv_expiry_delta as u16 >=
- update_opt.as_ref().unwrap().contents.cltv_expiry_delta,
+ 11 => {
+ update_opt.is_ok()
+ && amt_to_forward
+ > update_opt.as_ref().unwrap().contents.htlc_minimum_msat
+ },
+ 12 => {
+ update_opt.is_ok()
+ && amt_to_forward
+ .checked_mul(
+ update_opt
+ .as_ref()
+ .unwrap()
+ .contents
+ .fee_proportional_millionths as u64,
+ )
+ .map(|prop_fee| prop_fee / 1_000_000)
+ .and_then(|prop_fee| {
+ prop_fee.checked_add(
+ update_opt.as_ref().unwrap().contents.fee_base_msat
+ as u64,
+ )
+ })
+ .map(|fee_msats| route_hop.fee_msat >= fee_msats)
+ .unwrap_or(false)
+ },
+ 13 => {
+ update_opt.is_ok()
+ && route_hop.cltv_expiry_delta as u16
+ >= update_opt.as_ref().unwrap().contents.cltv_expiry_delta
+ },
14 => false, // expiry_too_soon; always valid?
20 => update_opt.as_ref().unwrap().contents.flags & 2 == 0,
_ => false, // unknown error code; take channel_update as valid
if let Ok(chan_update) = update_opt {
// Make sure the ChannelUpdate contains the expected
// short channel id.
- if failing_route_hop.short_channel_id == chan_update.contents.short_channel_id {
+ if failing_route_hop.short_channel_id
+ == chan_update.contents.short_channel_id
+ {
short_channel_id = Some(failing_route_hop.short_channel_id);
} else {
log_info!(logger, "Node provided a channel_update for which it was not authoritative, ignoring.");
}
- network_update = Some(NetworkUpdate::ChannelUpdateMessage {
- msg: chan_update,
- })
+ network_update =
+ Some(NetworkUpdate::ChannelUpdateMessage { msg: chan_update })
} else {
// The node in question intentionally encoded a 0-length channel update. This is
// likely due to https://github.com/ElementsProject/lightning/issues/6200.
// If the channel_update had a non-zero length (i.e. was
// present) but we couldn't read it, treat it as a total
// node failure.
- log_info!(logger,
+ log_info!(
+ logger,
"Failed to read a channel_update of len {} in an onion",
- update_slice.len());
+ update_slice.len()
+ );
}
}
}
// Only blame the hop when a value in the HTLC doesn't match the corresponding value in the
// onion.
short_channel_id = match error_code & 0xff {
- 18|19 => Some(route_hop.short_channel_id),
+ 18 | 19 => Some(route_hop.short_channel_id),
_ => None,
};
} else {
// We can't understand their error messages and they failed to forward...they probably can't
// understand our forwards so it's really not worth trying any further.
- network_update = Some(NetworkUpdate::NodeFailure {
- node_id: route_hop.pubkey,
- is_permanent: true,
- });
+ network_update =
+ Some(NetworkUpdate::NodeFailure { node_id: route_hop.pubkey, is_permanent: true });
short_channel_id = Some(route_hop.short_channel_id);
}
res = Some(FailureLearnings {
- network_update, short_channel_id,
+ network_update,
+ short_channel_id,
payment_failed_permanently: error_code & PERM == PERM && is_from_final_node,
- failed_within_blinded_path: false
+ failed_within_blinded_path: false,
});
let (description, title) = errors::get_onion_error_description(error_code);
if debug_field_size > 0 && err_packet.failuremsg.len() >= 4 + debug_field_size {
- log_info!(logger, "Onion Error[from {}: {}({:#x}) {}({})] {}", route_hop.pubkey, title, error_code, debug_field, log_bytes!(&err_packet.failuremsg[4..4+debug_field_size]), description);
+ log_info!(
+ logger,
+ "Onion Error[from {}: {}({:#x}) {}({})] {}",
+ route_hop.pubkey,
+ title,
+ error_code,
+ debug_field,
+ log_bytes!(&err_packet.failuremsg[4..4 + debug_field_size]),
+ description
+ );
} else {
- log_info!(logger, "Onion Error[from {}: {}({:#x})] {}", route_hop.pubkey, title, error_code, description);
+ log_info!(
+ logger,
+ "Onion Error[from {}: {}({:#x})] {}",
+ route_hop.pubkey,
+ title,
+ error_code,
+ description
+ );
}
- }).expect("Route that we sent via spontaneously grew invalid keys in the middle of it?");
+ };
+
+ construct_onion_keys_callback(secp_ctx, &path, session_priv, callback)
+ .expect("Route that we sent via spontaneously grew invalid keys in the middle of it?");
+
if let Some(FailureLearnings {
- network_update, short_channel_id, payment_failed_permanently, failed_within_blinded_path
- }) = res {
+ network_update,
+ short_channel_id,
+ payment_failed_permanently,
+ failed_within_blinded_path,
+ }) = res
+ {
DecodedOnionFailure {
- network_update, short_channel_id, payment_failed_permanently, failed_within_blinded_path,
+ network_update,
+ short_channel_id,
+ payment_failed_permanently,
+ failed_within_blinded_path,
#[cfg(test)]
onion_error_code: error_code_ret,
#[cfg(test)]
- onion_error_data: error_packet_ret
+ onion_error_data: error_packet_ret,
}
} else {
// only not set either packet unparseable or hmac does not match with any
// payment not retryable only when garbage is from the final node
DecodedOnionFailure {
- network_update: None, short_channel_id: None, payment_failed_permanently: is_from_final_node,
+ network_update: None,
+ short_channel_id: None,
+ payment_failed_permanently: is_from_final_node,
failed_within_blinded_path: false,
#[cfg(test)]
onion_error_code: None,
#[cfg(test)]
- onion_error_data: None
+ onion_error_data: None,
}
}
}
#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
#[cfg_attr(test, derive(PartialEq))]
enum HTLCFailReasonRepr {
- LightningError {
- err: msgs::OnionErrorPacket,
- },
- Reason {
- failure_code: u16,
- data: Vec<u8>,
- }
+ LightningError { err: msgs::OnionErrorPacket },
+ Reason { failure_code: u16, data: Vec<u8> },
}
impl core::fmt::Debug for HTLCFailReason {
},
HTLCFailReasonRepr::LightningError { .. } => {
write!(f, "pre-built LightningError")
- }
+ },
}
}
}
;);
impl HTLCFailReason {
+ #[rustfmt::skip]
pub(super) fn reason(failure_code: u16, data: Vec<u8>) -> Self {
const BADONION: u16 = 0x8000;
const PERM: u16 = 0x4000;
Self(HTLCFailReasonRepr::LightningError { err: msg.reason.clone() })
}
- pub(super) fn get_encrypted_failure_packet(&self, incoming_packet_shared_secret: &[u8; 32], phantom_shared_secret: &Option<[u8; 32]>)
- -> msgs::OnionErrorPacket {
+ pub(super) fn get_encrypted_failure_packet(
+ &self, incoming_packet_shared_secret: &[u8; 32], phantom_shared_secret: &Option<[u8; 32]>,
+ ) -> msgs::OnionErrorPacket {
match self.0 {
HTLCFailReasonRepr::Reason { ref failure_code, ref data } => {
if let Some(phantom_ss) = phantom_shared_secret {
- let phantom_packet = build_failure_packet(phantom_ss, *failure_code, &data[..]).encode();
- let encrypted_phantom_packet = encrypt_failure_packet(phantom_ss, &phantom_packet);
- encrypt_failure_packet(incoming_packet_shared_secret, &encrypted_phantom_packet.data[..])
+ let phantom_packet =
+ build_failure_packet(phantom_ss, *failure_code, &data[..]).encode();
+ let encrypted_phantom_packet =
+ encrypt_failure_packet(phantom_ss, &phantom_packet);
+ encrypt_failure_packet(
+ incoming_packet_shared_secret,
+ &encrypted_phantom_packet.data[..],
+ )
} else {
- let packet = build_failure_packet(incoming_packet_shared_secret, *failure_code, &data[..]).encode();
+ let packet = build_failure_packet(
+ incoming_packet_shared_secret,
+ *failure_code,
+ &data[..],
+ )
+ .encode();
encrypt_failure_packet(incoming_packet_shared_secret, &packet)
}
},
HTLCFailReasonRepr::LightningError { ref err } => {
encrypt_failure_packet(incoming_packet_shared_secret, &err.data)
- }
+ },
}
}
pub(super) fn decode_onion_failure<T: secp256k1::Signing, L: Deref>(
- &self, secp_ctx: &Secp256k1<T>, logger: &L, htlc_source: &HTLCSource
- ) -> DecodedOnionFailure where L::Target: Logger {
+ &self, secp_ctx: &Secp256k1<T>, logger: &L, htlc_source: &HTLCSource,
+ ) -> DecodedOnionFailure
+ where
+ L::Target: Logger,
+ {
match self.0 {
HTLCFailReasonRepr::LightningError { ref err } => {
process_onion_failure(secp_ctx, logger, &htlc_source, err.data.clone())
#[cfg(test)]
onion_error_data: Some(data.clone()),
}
- } else { unreachable!(); }
- }
+ } else {
+ unreachable!();
+ }
+ },
}
}
}
}
impl NextPacketBytes for FixedSizeOnionPacket {
- fn new(_len: usize) -> Self {
+ fn new(_len: usize) -> Self {
Self([0 as u8; ONION_DATA_LEN])
}
}
},
}
+impl Hop {
+ pub(crate) fn is_intro_node_blinded_forward(&self) -> bool {
+ match self {
+ Self::Forward {
+ next_hop_data:
+ msgs::InboundOnionPayload::BlindedForward {
+ intro_node_blinding_point: Some(_), ..
+ },
+ ..
+ } => true,
+ _ => false,
+ }
+ }
+}
+
/// Error returned when we fail to decode the onion packet.
#[derive(Debug)]
pub(crate) enum OnionDecodeErr {
/// The HMAC of the onion packet did not match the hop data.
- Malformed {
- err_msg: &'static str,
- err_code: u16,
- },
+ Malformed { err_msg: &'static str, err_code: u16 },
/// We failed to decode the onion payload.
- Relay {
- err_msg: &'static str,
- err_code: u16,
- },
+ Relay { err_msg: &'static str, err_code: u16 },
}
pub(crate) fn decode_next_payment_hop<NS: Deref>(
shared_secret: [u8; 32], hop_data: &[u8], hmac_bytes: [u8; 32], payment_hash: PaymentHash,
blinding_point: Option<PublicKey>, node_signer: &NS,
-) -> Result<Hop, OnionDecodeErr> where NS::Target: NodeSigner {
+) -> Result<Hop, OnionDecodeErr>
+where
+ NS::Target: NodeSigner,
+{
match decode_next_hop(
- shared_secret, hop_data, hmac_bytes, Some(payment_hash), (blinding_point, node_signer)
+ shared_secret,
+ hop_data,
+ hmac_bytes,
+ Some(payment_hash),
+ (blinding_point, node_signer),
) {
Ok((next_hop_data, None)) => Ok(Hop::Receive(next_hop_data)),
Ok((next_hop_data, Some((next_hop_hmac, FixedSizeOnionPacket(new_packet_bytes))))) => {
- Ok(Hop::Forward {
- next_hop_data,
- next_hop_hmac,
- new_packet_bytes
- })
+ Ok(Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes })
},
Err(e) => Err(e),
}
pub fn create_payment_onion<T: secp256k1::Signing>(
secp_ctx: &Secp256k1<T>, path: &Path, session_priv: &SecretKey, total_msat: u64,
recipient_onion: RecipientOnionFields, cur_block_height: u32, payment_hash: &PaymentHash,
- keysend_preimage: &Option<PaymentPreimage>, prng_seed: [u8; 32]
+ keysend_preimage: &Option<PaymentPreimage>, prng_seed: [u8; 32],
) -> Result<(msgs::OnionPacket, u64, u32), APIError> {
- let onion_keys = construct_onion_keys(&secp_ctx, &path, &session_priv)
- .map_err(|_| APIError::InvalidRoute{
- err: "Pubkey along hop was maliciously selected".to_owned()
- })?;
+ let onion_keys = construct_onion_keys(&secp_ctx, &path, &session_priv).map_err(|_| {
+ APIError::InvalidRoute { err: "Pubkey along hop was maliciously selected".to_owned() }
+ })?;
let (onion_payloads, htlc_msat, htlc_cltv) = build_onion_payloads(
- &path, total_msat, recipient_onion, cur_block_height, keysend_preimage
+ &path,
+ total_msat,
+ recipient_onion,
+ cur_block_height,
+ keysend_preimage,
)?;
let onion_packet = construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash)
- .map_err(|_| APIError::InvalidRoute{
- err: "Route size too large considering onion data".to_owned()
+ .map_err(|_| APIError::InvalidRoute {
+ err: "Route size too large considering onion data".to_owned(),
})?;
Ok((onion_packet, htlc_msat, htlc_cltv))
}
-pub(crate) fn decode_next_untagged_hop<T, R: ReadableArgs<T>, N: NextPacketBytes>(shared_secret: [u8; 32], hop_data: &[u8], hmac_bytes: [u8; 32], read_args: T) -> Result<(R, Option<([u8; 32], N)>), OnionDecodeErr> {
+pub(crate) fn decode_next_untagged_hop<T, R: ReadableArgs<T>, N: NextPacketBytes>(
+ shared_secret: [u8; 32], hop_data: &[u8], hmac_bytes: [u8; 32], read_args: T,
+) -> Result<(R, Option<([u8; 32], N)>), OnionDecodeErr> {
decode_next_hop(shared_secret, hop_data, hmac_bytes, None, read_args)
}
-fn decode_next_hop<T, R: ReadableArgs<T>, N: NextPacketBytes>(shared_secret: [u8; 32], hop_data: &[u8], hmac_bytes: [u8; 32], payment_hash: Option<PaymentHash>, read_args: T) -> Result<(R, Option<([u8; 32], N)>), OnionDecodeErr> {
+fn decode_next_hop<T, R: ReadableArgs<T>, N: NextPacketBytes>(
+ shared_secret: [u8; 32], hop_data: &[u8], hmac_bytes: [u8; 32],
+ payment_hash: Option<PaymentHash>, read_args: T,
+) -> Result<(R, Option<([u8; 32], N)>), OnionDecodeErr> {
let (rho, mu) = gen_rho_mu_from_shared_secret(&shared_secret);
let mut hmac = HmacEngine::<Sha256>::new(&mu);
hmac.input(hop_data);
match R::read(&mut chacha_stream, read_args) {
Err(err) => {
let error_code = match err {
- msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte
- msgs::DecodeError::UnknownRequiredFeature|
- msgs::DecodeError::InvalidValue|
- msgs::DecodeError::ShortRead => 0x4000 | 22, // invalid_onion_payload
- _ => 0x2000 | 2, // Should never happen
+ // Unknown realm byte
+ msgs::DecodeError::UnknownVersion => 0x4000 | 1,
+ // invalid_onion_payload
+ msgs::DecodeError::UnknownRequiredFeature
+ | msgs::DecodeError::InvalidValue
+ | msgs::DecodeError::ShortRead => 0x4000 | 22,
+ // Should never happen
+ _ => 0x2000 | 2,
};
return Err(OnionDecodeErr::Relay {
err_msg: "Unable to decode our hop data",
// Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we
// fill the onion hop data we'll forward to our next-hop peer.
chacha_stream.chacha.process_in_place(&mut new_packet_bytes.as_mut()[read_pos..]);
- return Ok((msg, Some((hmac, new_packet_bytes)))) // This packet needs forwarding
+ return Ok((msg, Some((hmac, new_packet_bytes)))); // This packet needs forwarding
}
},
}
#[cfg(test)]
mod tests {
use crate::io;
- use crate::prelude::*;
- use crate::ln::PaymentHash;
use crate::ln::features::{ChannelFeatures, NodeFeatures};
- use crate::routing::router::{Path, Route, RouteHop};
use crate::ln::msgs;
- use crate::util::ser::{Writeable, Writer, VecWriter};
+ use crate::ln::PaymentHash;
+ use crate::routing::router::{Path, Route, RouteHop};
+ use crate::util::ser::{VecWriter, Writeable, Writer};
+
+ #[allow(unused_imports)]
+ use crate::prelude::*;
use bitcoin::hashes::hex::FromHex;
use bitcoin::secp256k1::Secp256k1;
- use bitcoin::secp256k1::{PublicKey,SecretKey};
+ use bitcoin::secp256k1::{PublicKey, SecretKey};
use super::OnionKeys;
fn get_test_session_key() -> SecretKey {
- SecretKey::from_slice(&<Vec<u8>>::from_hex("4141414141414141414141414141414141414141414141414141414141414141").unwrap()[..]).unwrap()
+ let hex = "4141414141414141414141414141414141414141414141414141414141414141";
+ SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap()
}
fn build_test_onion_keys() -> Vec<OnionKeys> {
route_params: None,
};
- let onion_keys = super::construct_onion_keys(&secp_ctx, &route.paths[0], &get_test_session_key()).unwrap();
+ let onion_keys =
+ super::construct_onion_keys(&secp_ctx, &route.paths[0], &get_test_session_key())
+ .unwrap();
assert_eq!(onion_keys.len(), route.paths[0].hops.len());
onion_keys
}
// Test generation of ephemeral keys and secrets. These values used to be part of the BOLT4
// test vectors, but have since been removed. We keep them as they provide test coverage.
- assert_eq!(onion_keys[0].shared_secret.secret_bytes(), <Vec<u8>>::from_hex("53eb63ea8a3fec3b3cd433b85cd62a4b145e1dda09391b348c4e1cd36a03ea66").unwrap()[..]);
- assert_eq!(onion_keys[0].blinding_factor[..], <Vec<u8>>::from_hex("2ec2e5da605776054187180343287683aa6a51b4b1c04d6dd49c45d8cffb3c36").unwrap()[..]);
- assert_eq!(onion_keys[0].ephemeral_pubkey.serialize()[..], <Vec<u8>>::from_hex("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]);
- assert_eq!(onion_keys[0].rho, <Vec<u8>>::from_hex("ce496ec94def95aadd4bec15cdb41a740c9f2b62347c4917325fcc6fb0453986").unwrap()[..]);
- assert_eq!(onion_keys[0].mu, <Vec<u8>>::from_hex("b57061dc6d0a2b9f261ac410c8b26d64ac5506cbba30267a649c28c179400eba").unwrap()[..]);
-
- assert_eq!(onion_keys[1].shared_secret.secret_bytes(), <Vec<u8>>::from_hex("a6519e98832a0b179f62123b3567c106db99ee37bef036e783263602f3488fae").unwrap()[..]);
- assert_eq!(onion_keys[1].blinding_factor[..], <Vec<u8>>::from_hex("bf66c28bc22e598cfd574a1931a2bafbca09163df2261e6d0056b2610dab938f").unwrap()[..]);
- assert_eq!(onion_keys[1].ephemeral_pubkey.serialize()[..], <Vec<u8>>::from_hex("028f9438bfbf7feac2e108d677e3a82da596be706cc1cf342b75c7b7e22bf4e6e2").unwrap()[..]);
- assert_eq!(onion_keys[1].rho, <Vec<u8>>::from_hex("450ffcabc6449094918ebe13d4f03e433d20a3d28a768203337bc40b6e4b2c59").unwrap()[..]);
- assert_eq!(onion_keys[1].mu, <Vec<u8>>::from_hex("05ed2b4a3fb023c2ff5dd6ed4b9b6ea7383f5cfe9d59c11d121ec2c81ca2eea9").unwrap()[..]);
-
- assert_eq!(onion_keys[2].shared_secret.secret_bytes(), <Vec<u8>>::from_hex("3a6b412548762f0dbccce5c7ae7bb8147d1caf9b5471c34120b30bc9c04891cc").unwrap()[..]);
- assert_eq!(onion_keys[2].blinding_factor[..], <Vec<u8>>::from_hex("a1f2dadd184eb1627049673f18c6325814384facdee5bfd935d9cb031a1698a5").unwrap()[..]);
- assert_eq!(onion_keys[2].ephemeral_pubkey.serialize()[..], <Vec<u8>>::from_hex("03bfd8225241ea71cd0843db7709f4c222f62ff2d4516fd38b39914ab6b83e0da0").unwrap()[..]);
- assert_eq!(onion_keys[2].rho, <Vec<u8>>::from_hex("11bf5c4f960239cb37833936aa3d02cea82c0f39fd35f566109c41f9eac8deea").unwrap()[..]);
- assert_eq!(onion_keys[2].mu, <Vec<u8>>::from_hex("caafe2820fa00eb2eeb78695ae452eba38f5a53ed6d53518c5c6edf76f3f5b78").unwrap()[..]);
-
- assert_eq!(onion_keys[3].shared_secret.secret_bytes(), <Vec<u8>>::from_hex("21e13c2d7cfe7e18836df50872466117a295783ab8aab0e7ecc8c725503ad02d").unwrap()[..]);
- assert_eq!(onion_keys[3].blinding_factor[..], <Vec<u8>>::from_hex("7cfe0b699f35525029ae0fa437c69d0f20f7ed4e3916133f9cacbb13c82ff262").unwrap()[..]);
- assert_eq!(onion_keys[3].ephemeral_pubkey.serialize()[..], <Vec<u8>>::from_hex("031dde6926381289671300239ea8e57ffaf9bebd05b9a5b95beaf07af05cd43595").unwrap()[..]);
- assert_eq!(onion_keys[3].rho, <Vec<u8>>::from_hex("cbe784ab745c13ff5cffc2fbe3e84424aa0fd669b8ead4ee562901a4a4e89e9e").unwrap()[..]);
- assert_eq!(onion_keys[3].mu, <Vec<u8>>::from_hex("5052aa1b3d9f0655a0932e50d42f0c9ba0705142c25d225515c45f47c0036ee9").unwrap()[..]);
-
- assert_eq!(onion_keys[4].shared_secret.secret_bytes(), <Vec<u8>>::from_hex("b5756b9b542727dbafc6765a49488b023a725d631af688fc031217e90770c328").unwrap()[..]);
- assert_eq!(onion_keys[4].blinding_factor[..], <Vec<u8>>::from_hex("c96e00dddaf57e7edcd4fb5954be5b65b09f17cb6d20651b4e90315be5779205").unwrap()[..]);
- assert_eq!(onion_keys[4].ephemeral_pubkey.serialize()[..], <Vec<u8>>::from_hex("03a214ebd875aab6ddfd77f22c5e7311d7f77f17a169e599f157bbcdae8bf071f4").unwrap()[..]);
- assert_eq!(onion_keys[4].rho, <Vec<u8>>::from_hex("034e18b8cc718e8af6339106e706c52d8df89e2b1f7e9142d996acf88df8799b").unwrap()[..]);
- assert_eq!(onion_keys[4].mu, <Vec<u8>>::from_hex("8e45e5c61c2b24cb6382444db6698727afb063adecd72aada233d4bf273d975a").unwrap()[..]);
+ let hex = "53eb63ea8a3fec3b3cd433b85cd62a4b145e1dda09391b348c4e1cd36a03ea66";
+ assert_eq!(
+ onion_keys[0].shared_secret.secret_bytes(),
+ <Vec<u8>>::from_hex(hex).unwrap()[..]
+ );
+
+ let hex = "2ec2e5da605776054187180343287683aa6a51b4b1c04d6dd49c45d8cffb3c36";
+ assert_eq!(onion_keys[0].blinding_factor[..], <Vec<u8>>::from_hex(hex).unwrap()[..]);
+
+ let hex = "02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619";
+ assert_eq!(
+ onion_keys[0].ephemeral_pubkey.serialize()[..],
+ <Vec<u8>>::from_hex(hex).unwrap()[..]
+ );
+
+ let hex = "ce496ec94def95aadd4bec15cdb41a740c9f2b62347c4917325fcc6fb0453986";
+ assert_eq!(onion_keys[0].rho, <Vec<u8>>::from_hex(hex).unwrap()[..]);
+
+ let hex = "b57061dc6d0a2b9f261ac410c8b26d64ac5506cbba30267a649c28c179400eba";
+ assert_eq!(onion_keys[0].mu, <Vec<u8>>::from_hex(hex).unwrap()[..]);
+
+ let hex = "a6519e98832a0b179f62123b3567c106db99ee37bef036e783263602f3488fae";
+ assert_eq!(
+ onion_keys[1].shared_secret.secret_bytes(),
+ <Vec<u8>>::from_hex(hex).unwrap()[..]
+ );
+
+ let hex = "bf66c28bc22e598cfd574a1931a2bafbca09163df2261e6d0056b2610dab938f";
+ assert_eq!(onion_keys[1].blinding_factor[..], <Vec<u8>>::from_hex(hex).unwrap()[..]);
+
+ let hex = "028f9438bfbf7feac2e108d677e3a82da596be706cc1cf342b75c7b7e22bf4e6e2";
+ assert_eq!(
+ onion_keys[1].ephemeral_pubkey.serialize()[..],
+ <Vec<u8>>::from_hex(hex).unwrap()[..]
+ );
+
+ let hex = "450ffcabc6449094918ebe13d4f03e433d20a3d28a768203337bc40b6e4b2c59";
+ assert_eq!(onion_keys[1].rho, <Vec<u8>>::from_hex(hex).unwrap()[..]);
+
+ let hex = "05ed2b4a3fb023c2ff5dd6ed4b9b6ea7383f5cfe9d59c11d121ec2c81ca2eea9";
+ assert_eq!(onion_keys[1].mu, <Vec<u8>>::from_hex(hex).unwrap()[..]);
+
+ let hex = "3a6b412548762f0dbccce5c7ae7bb8147d1caf9b5471c34120b30bc9c04891cc";
+ assert_eq!(
+ onion_keys[2].shared_secret.secret_bytes(),
+ <Vec<u8>>::from_hex(hex).unwrap()[..]
+ );
+
+ let hex = "a1f2dadd184eb1627049673f18c6325814384facdee5bfd935d9cb031a1698a5";
+ assert_eq!(onion_keys[2].blinding_factor[..], <Vec<u8>>::from_hex(hex).unwrap()[..]);
+
+ let hex = "03bfd8225241ea71cd0843db7709f4c222f62ff2d4516fd38b39914ab6b83e0da0";
+ assert_eq!(
+ onion_keys[2].ephemeral_pubkey.serialize()[..],
+ <Vec<u8>>::from_hex(hex).unwrap()[..]
+ );
+
+ let hex = "11bf5c4f960239cb37833936aa3d02cea82c0f39fd35f566109c41f9eac8deea";
+ assert_eq!(onion_keys[2].rho, <Vec<u8>>::from_hex(hex).unwrap()[..]);
+
+ let hex = "caafe2820fa00eb2eeb78695ae452eba38f5a53ed6d53518c5c6edf76f3f5b78";
+ assert_eq!(onion_keys[2].mu, <Vec<u8>>::from_hex(hex).unwrap()[..]);
+
+ let hex = "21e13c2d7cfe7e18836df50872466117a295783ab8aab0e7ecc8c725503ad02d";
+ assert_eq!(
+ onion_keys[3].shared_secret.secret_bytes(),
+ <Vec<u8>>::from_hex(hex).unwrap()[..]
+ );
+
+ let hex = "7cfe0b699f35525029ae0fa437c69d0f20f7ed4e3916133f9cacbb13c82ff262";
+ assert_eq!(onion_keys[3].blinding_factor[..], <Vec<u8>>::from_hex(hex).unwrap()[..]);
+
+ let hex = "031dde6926381289671300239ea8e57ffaf9bebd05b9a5b95beaf07af05cd43595";
+ assert_eq!(
+ onion_keys[3].ephemeral_pubkey.serialize()[..],
+ <Vec<u8>>::from_hex(hex).unwrap()[..]
+ );
+
+ let hex = "cbe784ab745c13ff5cffc2fbe3e84424aa0fd669b8ead4ee562901a4a4e89e9e";
+ assert_eq!(onion_keys[3].rho, <Vec<u8>>::from_hex(hex).unwrap()[..]);
+
+ let hex = "5052aa1b3d9f0655a0932e50d42f0c9ba0705142c25d225515c45f47c0036ee9";
+ assert_eq!(onion_keys[3].mu, <Vec<u8>>::from_hex(hex).unwrap()[..]);
+
+ let hex = "b5756b9b542727dbafc6765a49488b023a725d631af688fc031217e90770c328";
+ assert_eq!(
+ onion_keys[4].shared_secret.secret_bytes(),
+ <Vec<u8>>::from_hex(hex).unwrap()[..]
+ );
+
+ let hex = "c96e00dddaf57e7edcd4fb5954be5b65b09f17cb6d20651b4e90315be5779205";
+ assert_eq!(onion_keys[4].blinding_factor[..], <Vec<u8>>::from_hex(hex).unwrap()[..]);
+
+ let hex = "03a214ebd875aab6ddfd77f22c5e7311d7f77f17a169e599f157bbcdae8bf071f4";
+ assert_eq!(
+ onion_keys[4].ephemeral_pubkey.serialize()[..],
+ <Vec<u8>>::from_hex(hex).unwrap()[..]
+ );
+
+ let hex = "034e18b8cc718e8af6339106e706c52d8df89e2b1f7e9142d996acf88df8799b";
+ assert_eq!(onion_keys[4].rho, <Vec<u8>>::from_hex(hex).unwrap()[..]);
+
+ let hex = "8e45e5c61c2b24cb6382444db6698727afb063adecd72aada233d4bf273d975a";
+ assert_eq!(onion_keys[4].mu, <Vec<u8>>::from_hex(hex).unwrap()[..]);
// Packet creation test vectors from BOLT 4 (see
// https://github.com/lightning/bolts/blob/16973e2b857e853308cafd59e42fa830d75b1642/bolt04/onion-test.json).
let mut w = VecWriter(Vec::new());
payloads[0].write(&mut w).unwrap();
let hop_1_serialized_payload = w.0;
- let expected_serialized_hop_1_payload = &<Vec<u8>>::from_hex("1202023a98040205dc06080000000000000001").unwrap()[..];
+ let hex = "1202023a98040205dc06080000000000000001";
+ let expected_serialized_hop_1_payload = &<Vec<u8>>::from_hex(hex).unwrap()[..];
assert_eq!(hop_1_serialized_payload, expected_serialized_hop_1_payload);
w = VecWriter(Vec::new());
payloads[2].write(&mut w).unwrap();
let hop_3_serialized_payload = w.0;
- let expected_serialized_hop_3_payload = &<Vec<u8>>::from_hex("12020230d4040204e206080000000000000003").unwrap()[..];
+ let hex = "12020230d4040204e206080000000000000003";
+ let expected_serialized_hop_3_payload = &<Vec<u8>>::from_hex(hex).unwrap()[..];
assert_eq!(hop_3_serialized_payload, expected_serialized_hop_3_payload);
w = VecWriter(Vec::new());
payloads[3].write(&mut w).unwrap();
let hop_4_serialized_payload = w.0;
- let expected_serialized_hop_4_payload = &<Vec<u8>>::from_hex("1202022710040203e806080000000000000004").unwrap()[..];
+ let hex = "1202022710040203e806080000000000000004";
+ let expected_serialized_hop_4_payload = &<Vec<u8>>::from_hex(hex).unwrap()[..];
assert_eq!(hop_4_serialized_payload, expected_serialized_hop_4_payload);
- let pad_keytype_seed = super::gen_pad_from_shared_secret(&get_test_session_key().secret_bytes());
+ let pad_keytype_seed =
+ super::gen_pad_from_shared_secret(&get_test_session_key().secret_bytes());
- let packet: msgs::OnionPacket = super::construct_onion_packet_with_writable_hopdata::<_>(payloads, onion_keys, pad_keytype_seed, &PaymentHash([0x42; 32])).unwrap();
+ let packet: msgs::OnionPacket = super::construct_onion_packet_with_writable_hopdata::<_>(
+ payloads,
+ onion_keys,
+ pad_keytype_seed,
+ &PaymentHash([0x42; 32]),
+ )
+ .unwrap();
- assert_eq!(packet.encode(), <Vec<u8>>::from_hex("0002EEC7245D6B7D2CCB30380BFBE2A3648CD7A942653F5AA340EDCEA1F283686619F7F3416A5AA36DC7EEB3EC6D421E9615471AB870A33AC07FA5D5A51DF0A8823AABE3FEA3F90D387529D4F72837F9E687230371CCD8D263072206DBED0234F6505E21E282ABD8C0E4F5B9FF8042800BBAB065036EADD0149B37F27DDE664725A49866E052E809D2B0198AB9610FAA656BBF4EC516763A59F8F42C171B179166BA38958D4F51B39B3E98706E2D14A2DAFD6A5DF808093ABFCA5AEAACA16EDED5DB7D21FB0294DD1A163EDF0FB445D5C8D7D688D6DD9C541762BF5A5123BF9939D957FE648416E88F1B0928BFA034982B22548E1A4D922690EECF546275AFB233ACF4323974680779F1A964CFE687456035CC0FBA8A5428430B390F0057B6D1FE9A8875BFA89693EEB838CE59F09D207A503EE6F6299C92D6361BC335FCBF9B5CD44747AADCE2CE6069CFDC3D671DAEF9F8AE590CF93D957C9E873E9A1BC62D9640DC8FC39C14902D49A1C80239B6C5B7FD91D05878CBF5FFC7DB2569F47C43D6C0D27C438ABFF276E87364DEB8858A37E5A62C446AF95D8B786EAF0B5FCF78D98B41496794F8DCAAC4EEF34B2ACFB94C7E8C32A9E9866A8FA0B6F2A06F00A1CCDE569F97EEC05C803BA7500ACC96691D8898D73D8E6A47B8F43C3D5DE74458D20EDA61474C426359677001FBD75A74D7D5DB6CB4FEB83122F133206203E4E2D293F838BF8C8B3A29ACB321315100B87E80E0EDB272EE80FDA944E3FB6084ED4D7F7C7D21C69D9DA43D31A90B70693F9B0CC3EAC74C11AB8FF655905688916CFA4EF0BD04135F2E50B7C689A21D04E8E981E74C6058188B9B1F9DFC3EEC6838E9FFBCF22CE738D8A177C19318DFFEF090CEE67E12DE1A3E2A39F61247547BA5257489CBC11D7D91ED34617FCC42F7A9DA2E3CF31A94A210A1018143173913C38F60E62B24BF0D7518F38B5BAB3E6A1F8AEB35E31D6442C8ABB5178EFC892D2E787D79C6AD9E2FC271792983FA9955AC4D1D84A36C024071BC6E431B625519D556AF38185601F70E29035EA6A09C8B676C9D88CF7E05E0F17098B584C4168735940263F940033A220F40BE4C85344128B14BEB9E75696DB37014107801A59B13E89CD9D2258C169D523BE6D31552C44C82FF4BB18EC9F099F3BF0E5B1BB2BA9A87D7E26F98D294927B600B5529C47E04D98956677CBCEE8FA2B60F49776D8B8C367465B7C626DA53700684FB6C918EAD0EAB8360E4F60EDD25B4F43816A75ECF70F909301825B512469F8389D79402311D8AECB7B3EF8599E79485A4388D87744D899F7C47EE644361E17040A7958C8911BE6F463AB6A9B2AFACD688EC55EF517B38F1339EFC54487232798BB25522FF4572FF68567FE830F92F7B8113EFCE3E98C3FFFBAEDCE4FD8B50E41DA97C0C08E423A72689CC68E68F752A5E3A9003E64E35C957CA2E1C48BB6F64B05F56B70B575AD2F278D57850A7AD568C24A4D32A3D74B29F03DC125488BC7C637DA582357F40B0A52D16B3B40BB2C2315D03360BC24209E20972C200566BCF3BBE5C5B0AEDD83132A8A4D5B4242BA370B6D67D9B67EB01052D132C7866B9CB502E44796D9D356E4E3CB47CC527322CD24976FE7C9257A2864151A38E568EF7A79F10D6EF27CC04CE382347A2488B1F404FDBF407FE1CA1C9D0D5649E34800E25E18951C98CAE9F43555EEF65FEE1EA8F15828807366C3B612CD5753BF9FB8FCED08855F742CDDD6F765F74254F03186683D646E6F09AC2805586C7CF11998357CAFC5DF3F285329366F475130C928B2DCEBA4AA383758E7A9D20705C4BB9DB619E2992F608A1BA65DB254BB389468741D0502E2588AEB54390AC600C19AF5C8E61383FC1BEBE0029E4474051E4EF908828DB9CCA13277EF65DB3FD47CCC2179126AAEFB627719F421E20").unwrap());
+ let hex = "0002EEC7245D6B7D2CCB30380BFBE2A3648CD7A942653F5AA340EDCEA1F283686619F7F3416A5AA36DC7EEB3EC6D421E9615471AB870A33AC07FA5D5A51DF0A8823AABE3FEA3F90D387529D4F72837F9E687230371CCD8D263072206DBED0234F6505E21E282ABD8C0E4F5B9FF8042800BBAB065036EADD0149B37F27DDE664725A49866E052E809D2B0198AB9610FAA656BBF4EC516763A59F8F42C171B179166BA38958D4F51B39B3E98706E2D14A2DAFD6A5DF808093ABFCA5AEAACA16EDED5DB7D21FB0294DD1A163EDF0FB445D5C8D7D688D6DD9C541762BF5A5123BF9939D957FE648416E88F1B0928BFA034982B22548E1A4D922690EECF546275AFB233ACF4323974680779F1A964CFE687456035CC0FBA8A5428430B390F0057B6D1FE9A8875BFA89693EEB838CE59F09D207A503EE6F6299C92D6361BC335FCBF9B5CD44747AADCE2CE6069CFDC3D671DAEF9F8AE590CF93D957C9E873E9A1BC62D9640DC8FC39C14902D49A1C80239B6C5B7FD91D05878CBF5FFC7DB2569F47C43D6C0D27C438ABFF276E87364DEB8858A37E5A62C446AF95D8B786EAF0B5FCF78D98B41496794F8DCAAC4EEF34B2ACFB94C7E8C32A9E9866A8FA0B6F2A06F00A1CCDE569F97EEC05C803BA7500ACC96691D8898D73D8E6A47B8F43C3D5DE74458D20EDA61474C426359677001FBD75A74D7D5DB6CB4FEB83122F133206203E4E2D293F838BF8C8B3A29ACB321315100B87E80E0EDB272EE80FDA944E3FB6084ED4D7F7C7D21C69D9DA43D31A90B70693F9B0CC3EAC74C11AB8FF655905688916CFA4EF0BD04135F2E50B7C689A21D04E8E981E74C6058188B9B1F9DFC3EEC6838E9FFBCF22CE738D8A177C19318DFFEF090CEE67E12DE1A3E2A39F61247547BA5257489CBC11D7D91ED34617FCC42F7A9DA2E3CF31A94A210A1018143173913C38F60E62B24BF0D7518F38B5BAB3E6A1F8AEB35E31D6442C8ABB5178EFC892D2E787D79C6AD9E2FC271792983FA9955AC4D1D84A36C024071BC6E431B625519D556AF38185601F70E29035EA6A09C8B676C9D88CF7E05E0F17098B584C4168735940263F940033A220F40BE4C85344128B14BEB9E75696DB37014107801A59B13E89CD9D2258C169D523BE6D31552C44C82FF4BB18EC9F099F3BF0E5B1BB2BA9A87D7E26F98D294927B600B5529C47E04D98956677CBCEE8FA2B60F49776D8B8C367465B7C626DA53700684FB6C918EAD0EAB8360E4F60EDD25B4F43816A75ECF70F909301825B512469F8389D79402311D8AECB7B3EF8599E79485A4388D87744D899F7C47EE644361E17040A7958C8911BE6F463AB6A9B2AFACD688EC55EF517B38F1339EFC54487232798BB25522FF4572FF68567FE830F92F7B8113EFCE3E98C3FFFBAEDCE4FD8B50E41DA97C0C08E423A72689CC68E68F752A5E3A9003E64E35C957CA2E1C48BB6F64B05F56B70B575AD2F278D57850A7AD568C24A4D32A3D74B29F03DC125488BC7C637DA582357F40B0A52D16B3B40BB2C2315D03360BC24209E20972C200566BCF3BBE5C5B0AEDD83132A8A4D5B4242BA370B6D67D9B67EB01052D132C7866B9CB502E44796D9D356E4E3CB47CC527322CD24976FE7C9257A2864151A38E568EF7A79F10D6EF27CC04CE382347A2488B1F404FDBF407FE1CA1C9D0D5649E34800E25E18951C98CAE9F43555EEF65FEE1EA8F15828807366C3B612CD5753BF9FB8FCED08855F742CDDD6F765F74254F03186683D646E6F09AC2805586C7CF11998357CAFC5DF3F285329366F475130C928B2DCEBA4AA383758E7A9D20705C4BB9DB619E2992F608A1BA65DB254BB389468741D0502E2588AEB54390AC600C19AF5C8E61383FC1BEBE0029E4474051E4EF908828DB9CCA13277EF65DB3FD47CCC2179126AAEFB627719F421E20";
+ assert_eq!(packet.encode(), <Vec<u8>>::from_hex(hex).unwrap());
}
#[test]
// Returning Errors test vectors from BOLT 4
let onion_keys = build_test_onion_keys();
- let onion_error = super::build_failure_packet(onion_keys[4].shared_secret.as_ref(), 0x2002, &[0; 0]);
- assert_eq!(onion_error.encode(), <Vec<u8>>::from_hex("4c2fc8bc08510334b6833ad9c3e79cd1b52ae59dfe5c2a4b23ead50f09f7ee0b0002200200fe0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap());
-
- let onion_packet_1 = super::encrypt_failure_packet(onion_keys[4].shared_secret.as_ref(), &onion_error.encode()[..]);
- assert_eq!(onion_packet_1.data, <Vec<u8>>::from_hex("a5e6bd0c74cb347f10cce367f949098f2457d14c046fd8a22cb96efb30b0fdcda8cb9168b50f2fd45edd73c1b0c8b33002df376801ff58aaa94000bf8a86f92620f343baef38a580102395ae3abf9128d1047a0736ff9b83d456740ebbb4aeb3aa9737f18fb4afb4aa074fb26c4d702f42968888550a3bded8c05247e045b866baef0499f079fdaeef6538f31d44deafffdfd3afa2fb4ca9082b8f1c465371a9894dd8c243fb4847e004f5256b3e90e2edde4c9fb3082ddfe4d1e734cacd96ef0706bf63c9984e22dc98851bcccd1c3494351feb458c9c6af41c0044bea3c47552b1d992ae542b17a2d0bba1a096c78d169034ecb55b6e3a7263c26017f033031228833c1daefc0dedb8cf7c3e37c9c37ebfe42f3225c326e8bcfd338804c145b16e34e4").unwrap());
+ let onion_error =
+ super::build_failure_packet(onion_keys[4].shared_secret.as_ref(), 0x2002, &[0; 0]);
+ let hex = "4c2fc8bc08510334b6833ad9c3e79cd1b52ae59dfe5c2a4b23ead50f09f7ee0b0002200200fe0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000";
+ assert_eq!(onion_error.encode(), <Vec<u8>>::from_hex(hex).unwrap());
+
+ let onion_packet_1 = super::encrypt_failure_packet(
+ onion_keys[4].shared_secret.as_ref(),
+ &onion_error.encode()[..],
+ );
+ let hex = "a5e6bd0c74cb347f10cce367f949098f2457d14c046fd8a22cb96efb30b0fdcda8cb9168b50f2fd45edd73c1b0c8b33002df376801ff58aaa94000bf8a86f92620f343baef38a580102395ae3abf9128d1047a0736ff9b83d456740ebbb4aeb3aa9737f18fb4afb4aa074fb26c4d702f42968888550a3bded8c05247e045b866baef0499f079fdaeef6538f31d44deafffdfd3afa2fb4ca9082b8f1c465371a9894dd8c243fb4847e004f5256b3e90e2edde4c9fb3082ddfe4d1e734cacd96ef0706bf63c9984e22dc98851bcccd1c3494351feb458c9c6af41c0044bea3c47552b1d992ae542b17a2d0bba1a096c78d169034ecb55b6e3a7263c26017f033031228833c1daefc0dedb8cf7c3e37c9c37ebfe42f3225c326e8bcfd338804c145b16e34e4";
+ assert_eq!(onion_packet_1.data, <Vec<u8>>::from_hex(hex).unwrap());
- let onion_packet_2 = super::encrypt_failure_packet(onion_keys[3].shared_secret.as_ref(), &onion_packet_1.data[..]);
- assert_eq!(onion_packet_2.data, <Vec<u8>>::from_hex("c49a1ce81680f78f5f2000cda36268de34a3f0a0662f55b4e837c83a8773c22aa081bab1616a0011585323930fa5b9fae0c85770a2279ff59ec427ad1bbff9001c0cd1497004bd2a0f68b50704cf6d6a4bf3c8b6a0833399a24b3456961ba00736785112594f65b6b2d44d9f5ea4e49b5e1ec2af978cbe31c67114440ac51a62081df0ed46d4a3df295da0b0fe25c0115019f03f15ec86fabb4c852f83449e812f141a9395b3f70b766ebbd4ec2fae2b6955bd8f32684c15abfe8fd3a6261e52650e8807a92158d9f1463261a925e4bfba44bd20b166d532f0017185c3a6ac7957adefe45559e3072c8dc35abeba835a8cb01a71a15c736911126f27d46a36168ca5ef7dccd4e2886212602b181463e0dd30185c96348f9743a02aca8ec27c0b90dca270").unwrap());
+ let onion_packet_2 = super::encrypt_failure_packet(
+ onion_keys[3].shared_secret.as_ref(),
+ &onion_packet_1.data[..],
+ );
+ let hex = "c49a1ce81680f78f5f2000cda36268de34a3f0a0662f55b4e837c83a8773c22aa081bab1616a0011585323930fa5b9fae0c85770a2279ff59ec427ad1bbff9001c0cd1497004bd2a0f68b50704cf6d6a4bf3c8b6a0833399a24b3456961ba00736785112594f65b6b2d44d9f5ea4e49b5e1ec2af978cbe31c67114440ac51a62081df0ed46d4a3df295da0b0fe25c0115019f03f15ec86fabb4c852f83449e812f141a9395b3f70b766ebbd4ec2fae2b6955bd8f32684c15abfe8fd3a6261e52650e8807a92158d9f1463261a925e4bfba44bd20b166d532f0017185c3a6ac7957adefe45559e3072c8dc35abeba835a8cb01a71a15c736911126f27d46a36168ca5ef7dccd4e2886212602b181463e0dd30185c96348f9743a02aca8ec27c0b90dca270";
+ assert_eq!(onion_packet_2.data, <Vec<u8>>::from_hex(hex).unwrap());
- let onion_packet_3 = super::encrypt_failure_packet(onion_keys[2].shared_secret.as_ref(), &onion_packet_2.data[..]);
- assert_eq!(onion_packet_3.data, <Vec<u8>>::from_hex("a5d3e8634cfe78b2307d87c6d90be6fe7855b4f2cc9b1dfb19e92e4b79103f61ff9ac25f412ddfb7466e74f81b3e545563cdd8f5524dae873de61d7bdfccd496af2584930d2b566b4f8d3881f8c043df92224f38cf094cfc09d92655989531524593ec6d6caec1863bdfaa79229b5020acc034cd6deeea1021c50586947b9b8e6faa83b81fbfa6133c0af5d6b07c017f7158fa94f0d206baf12dda6b68f785b773b360fd0497e16cc402d779c8d48d0fa6315536ef0660f3f4e1865f5b38ea49c7da4fd959de4e83ff3ab686f059a45c65ba2af4a6a79166aa0f496bf04d06987b6d2ea205bdb0d347718b9aeff5b61dfff344993a275b79717cd815b6ad4c0beb568c4ac9c36ff1c315ec1119a1993c4b61e6eaa0375e0aaf738ac691abd3263bf937e3").unwrap());
+ let onion_packet_3 = super::encrypt_failure_packet(
+ onion_keys[2].shared_secret.as_ref(),
+ &onion_packet_2.data[..],
+ );
+ let hex = "a5d3e8634cfe78b2307d87c6d90be6fe7855b4f2cc9b1dfb19e92e4b79103f61ff9ac25f412ddfb7466e74f81b3e545563cdd8f5524dae873de61d7bdfccd496af2584930d2b566b4f8d3881f8c043df92224f38cf094cfc09d92655989531524593ec6d6caec1863bdfaa79229b5020acc034cd6deeea1021c50586947b9b8e6faa83b81fbfa6133c0af5d6b07c017f7158fa94f0d206baf12dda6b68f785b773b360fd0497e16cc402d779c8d48d0fa6315536ef0660f3f4e1865f5b38ea49c7da4fd959de4e83ff3ab686f059a45c65ba2af4a6a79166aa0f496bf04d06987b6d2ea205bdb0d347718b9aeff5b61dfff344993a275b79717cd815b6ad4c0beb568c4ac9c36ff1c315ec1119a1993c4b61e6eaa0375e0aaf738ac691abd3263bf937e3";
+ assert_eq!(onion_packet_3.data, <Vec<u8>>::from_hex(hex).unwrap());
- let onion_packet_4 = super::encrypt_failure_packet(onion_keys[1].shared_secret.as_ref(), &onion_packet_3.data[..]);
- assert_eq!(onion_packet_4.data, <Vec<u8>>::from_hex("aac3200c4968f56b21f53e5e374e3a2383ad2b1b6501bbcc45abc31e59b26881b7dfadbb56ec8dae8857add94e6702fb4c3a4de22e2e669e1ed926b04447fc73034bb730f4932acd62727b75348a648a1128744657ca6a4e713b9b646c3ca66cac02cdab44dd3439890ef3aaf61708714f7375349b8da541b2548d452d84de7084bb95b3ac2345201d624d31f4d52078aa0fa05a88b4e20202bd2b86ac5b52919ea305a8949de95e935eed0319cf3cf19ebea61d76ba92532497fcdc9411d06bcd4275094d0a4a3c5d3a945e43305a5a9256e333e1f64dbca5fcd4e03a39b9012d197506e06f29339dfee3331995b21615337ae060233d39befea925cc262873e0530408e6990f1cbd233a150ef7b004ff6166c70c68d9f8c853c1abca640b8660db2921").unwrap());
+ let onion_packet_4 = super::encrypt_failure_packet(
+ onion_keys[1].shared_secret.as_ref(),
+ &onion_packet_3.data[..],
+ );
+ let hex = "aac3200c4968f56b21f53e5e374e3a2383ad2b1b6501bbcc45abc31e59b26881b7dfadbb56ec8dae8857add94e6702fb4c3a4de22e2e669e1ed926b04447fc73034bb730f4932acd62727b75348a648a1128744657ca6a4e713b9b646c3ca66cac02cdab44dd3439890ef3aaf61708714f7375349b8da541b2548d452d84de7084bb95b3ac2345201d624d31f4d52078aa0fa05a88b4e20202bd2b86ac5b52919ea305a8949de95e935eed0319cf3cf19ebea61d76ba92532497fcdc9411d06bcd4275094d0a4a3c5d3a945e43305a5a9256e333e1f64dbca5fcd4e03a39b9012d197506e06f29339dfee3331995b21615337ae060233d39befea925cc262873e0530408e6990f1cbd233a150ef7b004ff6166c70c68d9f8c853c1abca640b8660db2921";
+ assert_eq!(onion_packet_4.data, <Vec<u8>>::from_hex(hex).unwrap());
- let onion_packet_5 = super::encrypt_failure_packet(onion_keys[0].shared_secret.as_ref(), &onion_packet_4.data[..]);
- assert_eq!(onion_packet_5.data, <Vec<u8>>::from_hex("9c5add3963fc7f6ed7f148623c84134b5647e1306419dbe2174e523fa9e2fbed3a06a19f899145610741c83ad40b7712aefaddec8c6baf7325d92ea4ca4d1df8bce517f7e54554608bf2bd8071a4f52a7a2f7ffbb1413edad81eeea5785aa9d990f2865dc23b4bc3c301a94eec4eabebca66be5cf638f693ec256aec514620cc28ee4a94bd9565bc4d4962b9d3641d4278fb319ed2b84de5b665f307a2db0f7fbb757366067d88c50f7e829138fde4f78d39b5b5802f1b92a8a820865af5cc79f9f30bc3f461c66af95d13e5e1f0381c184572a91dee1c849048a647a1158cf884064deddbf1b0b88dfe2f791428d0ba0f6fb2f04e14081f69165ae66d9297c118f0907705c9c4954a199bae0bb96fad763d690e7daa6cfda59ba7f2c8d11448b604d12d").unwrap());
+ let onion_packet_5 = super::encrypt_failure_packet(
+ onion_keys[0].shared_secret.as_ref(),
+ &onion_packet_4.data[..],
+ );
+ let hex = "9c5add3963fc7f6ed7f148623c84134b5647e1306419dbe2174e523fa9e2fbed3a06a19f899145610741c83ad40b7712aefaddec8c6baf7325d92ea4ca4d1df8bce517f7e54554608bf2bd8071a4f52a7a2f7ffbb1413edad81eeea5785aa9d990f2865dc23b4bc3c301a94eec4eabebca66be5cf638f693ec256aec514620cc28ee4a94bd9565bc4d4962b9d3641d4278fb319ed2b84de5b665f307a2db0f7fbb757366067d88c50f7e829138fde4f78d39b5b5802f1b92a8a820865af5cc79f9f30bc3f461c66af95d13e5e1f0381c184572a91dee1c849048a647a1158cf884064deddbf1b0b88dfe2f791428d0ba0f6fb2f04e14081f69165ae66d9297c118f0907705c9c4954a199bae0bb96fad763d690e7daa6cfda59ba7f2c8d11448b604d12d";
+ assert_eq!(onion_packet_5.data, <Vec<u8>>::from_hex(hex).unwrap());
}
struct RawOnionHopData {
- data: Vec<u8>
+ data: Vec<u8>,
}
impl RawOnionHopData {
fn new(orig: msgs::OutboundOnionPayload) -> Self {
use crate::util::errors::APIError;
use crate::util::logger::Logger;
use crate::util::time::Time;
-#[cfg(all(not(feature = "no-std"), test))]
+#[cfg(all(feature = "std", test))]
use crate::util::time::tests::SinceEpoch;
use crate::util::ser::ReadableArgs;
}
fn mark_fulfilled(&mut self) {
- let mut session_privs = HashSet::new();
+ let mut session_privs = new_hash_set();
core::mem::swap(&mut session_privs, match self {
PendingOutboundPayment::Legacy { session_privs } |
PendingOutboundPayment::Retryable { session_privs, .. } |
fn mark_abandoned(&mut self, reason: PaymentFailureReason) {
if let PendingOutboundPayment::Retryable { session_privs, payment_hash, .. } = self {
- let mut our_session_privs = HashSet::new();
+ let mut our_session_privs = new_hash_set();
core::mem::swap(&mut our_session_privs, session_privs);
*self = PendingOutboundPayment::Abandoned {
session_privs: our_session_privs,
};
} else if let PendingOutboundPayment::InvoiceReceived { payment_hash, .. } = self {
*self = PendingOutboundPayment::Abandoned {
- session_privs: HashSet::new(),
+ session_privs: new_hash_set(),
payment_hash: *payment_hash,
reason: Some(reason)
};
/// retry, and may retry multiple failed HTLCs at once if they failed around the same time and
/// were retried along a route from a single call to [`Router::find_route_with_id`].
Attempts(u32),
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
/// Time elapsed before abandoning retries for a payment. At least one attempt at payment is made;
/// see [`PaymentParameters::expiry_time`] to avoid any attempt at payment after a specific time.
///
Timeout(core::time::Duration),
}
-#[cfg(feature = "no-std")]
+#[cfg(not(feature = "std"))]
impl_writeable_tlv_based_enum!(Retry,
;
(0, Attempts)
);
-#[cfg(not(feature = "no-std"))]
+#[cfg(feature = "std")]
impl_writeable_tlv_based_enum!(Retry,
;
(0, Attempts),
(Retry::Attempts(max_retry_count), PaymentAttempts { count, .. }) => {
max_retry_count > count
},
- #[cfg(all(not(feature = "no-std"), not(test)))]
+ #[cfg(all(feature = "std", not(test)))]
(Retry::Timeout(max_duration), PaymentAttempts { first_attempted_at, .. }) =>
*max_duration >= crate::util::time::MonotonicTime::now().duration_since(*first_attempted_at),
- #[cfg(all(not(feature = "no-std"), test))]
+ #[cfg(all(feature = "std", test))]
(Retry::Timeout(max_duration), PaymentAttempts { first_attempted_at, .. }) =>
*max_duration >= SinceEpoch::now().duration_since(*first_attempted_at),
}
/// it means the result of the first attempt is not known yet.
pub(crate) count: u32,
/// This field is only used when retry is `Retry::Timeout` which is only build with feature std
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
first_attempted_at: T,
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
phantom: core::marker::PhantomData<T>,
}
-#[cfg(not(any(feature = "no-std", test)))]
-type ConfiguredTime = crate::util::time::MonotonicTime;
-#[cfg(feature = "no-std")]
+#[cfg(not(feature = "std"))]
type ConfiguredTime = crate::util::time::Eternity;
-#[cfg(all(not(feature = "no-std"), test))]
+#[cfg(all(feature = "std", not(test)))]
+type ConfiguredTime = crate::util::time::MonotonicTime;
+#[cfg(all(feature = "std", test))]
type ConfiguredTime = SinceEpoch;
impl<T: Time> PaymentAttemptsUsingTime<T> {
pub(crate) fn new() -> Self {
PaymentAttemptsUsingTime {
count: 0,
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
first_attempted_at: T::now(),
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
phantom: core::marker::PhantomData,
}
}
impl<T: Time> Display for PaymentAttemptsUsingTime<T> {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
return write!(f, "attempts: {}", self.count);
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
return write!(
f,
"attempts: {}, duration: {}s",
impl OutboundPayments {
pub(super) fn new() -> Self {
Self {
- pending_outbound_payments: Mutex::new(HashMap::new()),
+ pending_outbound_payments: Mutex::new(new_hash_map()),
retry_lock: Mutex::new(()),
}
}
retry_strategy,
attempts: PaymentAttempts::new(),
payment_params,
- session_privs: HashSet::new(),
+ session_privs: new_hash_set(),
pending_amt_msat: 0,
pending_fee_msat: Some(0),
payment_hash,
let logger = test_utils::TestLogger::new();
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
- let router = test_utils::TestRouter::new(network_graph, &scorer);
+ let router = test_utils::TestRouter::new(network_graph, &logger, &scorer);
let secp_ctx = Secp256k1::new();
let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
let logger = test_utils::TestLogger::new();
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
- let router = test_utils::TestRouter::new(network_graph, &scorer);
+ let router = test_utils::TestRouter::new(network_graph, &logger, &scorer);
let secp_ctx = Secp256k1::new();
let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
let logger = test_utils::TestLogger::new();
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
- let router = test_utils::TestRouter::new(network_graph, &scorer);
+ let router = test_utils::TestRouter::new(network_graph, &logger, &scorer);
let secp_ctx = Secp256k1::new();
let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
let logger = test_utils::TestLogger::new();
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
- let router = test_utils::TestRouter::new(network_graph, &scorer);
+ let router = test_utils::TestRouter::new(network_graph, &logger, &scorer);
let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
let pending_events = Mutex::new(VecDeque::new());
let logger = test_utils::TestLogger::new();
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
- let router = test_utils::TestRouter::new(network_graph, &scorer);
+ let router = test_utils::TestRouter::new(network_graph, &logger, &scorer);
let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
let pending_events = Mutex::new(VecDeque::new());
assert!(pending_events.lock().unwrap().is_empty());
}
- #[test]
- fn fails_paying_for_bolt12_invoice() {
- let logger = test_utils::TestLogger::new();
- let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
- let scorer = RwLock::new(test_utils::TestScorer::new());
- let router = test_utils::TestRouter::new(network_graph, &scorer);
- let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
-
- let pending_events = Mutex::new(VecDeque::new());
- let outbound_payments = OutboundPayments::new();
- let payment_id = PaymentId([0; 32]);
- let expiration = StaleExpiration::AbsoluteTimeout(Duration::from_secs(100));
-
- let invoice = OfferBuilder::new("foo".into(), recipient_pubkey())
- .amount_msats(1000)
- .build().unwrap()
- .request_invoice(vec![1; 32], payer_pubkey()).unwrap()
- .build().unwrap()
- .sign(payer_sign).unwrap()
- .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
- .build().unwrap()
- .sign(recipient_sign).unwrap();
-
- assert!(
- outbound_payments.add_new_awaiting_invoice(
- payment_id, expiration, Retry::Attempts(0),
- Some(invoice.amount_msats() / 100 + 50_000)
- ).is_ok()
- );
- assert!(outbound_payments.has_pending_payments());
-
- let route_params = RouteParameters::from_payment_params_and_value(
- PaymentParameters::from_bolt12_invoice(&invoice),
- invoice.amount_msats(),
- );
- router.expect_find_route(
- route_params.clone(), Ok(Route { paths: vec![], route_params: Some(route_params) })
- );
-
- assert_eq!(
- outbound_payments.send_payment_for_bolt12_invoice(
- &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
- &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
- ),
- Ok(()),
- );
- assert!(!outbound_payments.has_pending_payments());
-
- let payment_hash = invoice.payment_hash();
- let reason = Some(PaymentFailureReason::UnexpectedError);
-
- assert!(!pending_events.lock().unwrap().is_empty());
- assert_eq!(
- pending_events.lock().unwrap().pop_front(),
- Some((Event::PaymentFailed { payment_id, payment_hash, reason }, None)),
- );
- assert!(pending_events.lock().unwrap().is_empty());
- }
-
#[test]
fn sends_payment_for_bolt12_invoice() {
let logger = test_utils::TestLogger::new();
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
- let router = test_utils::TestRouter::new(network_graph, &scorer);
+ let router = test_utils::TestRouter::new(network_graph, &logger, &scorer);
let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
let pending_events = Mutex::new(VecDeque::new());
use crate::ln::functional_test_utils;
use crate::ln::functional_test_utils::*;
use crate::routing::gossip::NodeId;
+
#[cfg(feature = "std")]
-use std::time::{SystemTime, Instant, Duration};
-#[cfg(not(feature = "no-std"))]
-use crate::util::time::tests::SinceEpoch;
+use {
+ crate::util::time::tests::SinceEpoch,
+ std::time::{SystemTime, Instant, Duration},
+};
#[test]
fn mpp_failure() {
// Can't use claim_payment_along_route as it doesn't support overpayment, so we break out the
// individual steps here.
+ nodes[3].node.claim_funds(payment_preimage);
let extra_fees = vec![0, total_overpaid_amount];
- let expected_total_fee_msat = do_claim_payment_along_route_with_extra_penultimate_hop_fees(
- &nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], &extra_fees[..], false,
- payment_preimage);
+ let expected_route = &[&[&nodes[1], &nodes[3]][..], &[&nodes[2], &nodes[3]][..]];
+ let args = ClaimAlongRouteArgs::new(&nodes[0], &expected_route[..], payment_preimage)
+ .with_expected_min_htlc_overpay(extra_fees);
+ let expected_total_fee_msat = pass_claimed_payment_along_route(args);
expect_payment_sent!(&nodes[0], payment_preimage, Some(expected_total_fee_msat));
}
assert_eq!(skimmed_fee_msat * num_mpp_parts as u64, counterparty_skimmed_fee_msat);
assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
match purpose {
- crate::events::PaymentPurpose::InvoicePayment { payment_preimage: ev_payment_preimage,
- payment_secret: ev_payment_secret, .. } =>
- {
+ crate::events::PaymentPurpose::Bolt11InvoicePayment {
+ payment_preimage: ev_payment_preimage,
+ payment_secret: ev_payment_secret,
+ ..
+ } => {
assert_eq!(payment_preimage, ev_payment_preimage.unwrap());
assert_eq!(payment_secret, *ev_payment_secret);
},
let mut expected_paths = Vec::new();
for _ in 0..num_mpp_parts { expected_paths_vecs.push(vec!(&nodes[1], &nodes[2])); }
for i in 0..num_mpp_parts { expected_paths.push(&expected_paths_vecs[i][..]); }
- let total_fee_msat = do_claim_payment_along_route_with_extra_penultimate_hop_fees(
- &nodes[0], &expected_paths[..], &vec![skimmed_fee_msat as u32; num_mpp_parts][..], false,
- payment_preimage);
+ expected_paths[0].last().unwrap().node.claim_funds(payment_preimage);
+ let args = ClaimAlongRouteArgs::new(&nodes[0], &expected_paths[..], payment_preimage)
+ .with_expected_extra_fees(vec![skimmed_fee_msat as u32; num_mpp_parts]);
+ let total_fee_msat = pass_claimed_payment_along_route(args);
// The sender doesn't know that the penultimate hop took an extra fee.
expect_payment_sent(&nodes[0], payment_preimage,
Some(Some(total_fee_msat - skimmed_fee_msat * num_mpp_parts as u64)), true, true);
let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 0);
} else if test == AutoRetry::FailTimeout {
- #[cfg(not(feature = "no-std"))] {
+ #[cfg(feature = "std")] {
// Ensure ChannelManager will not retry a payment if it times out due to Retry::Timeout.
nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
PaymentId(payment_hash.0), route_params, Retry::Timeout(Duration::from_secs(60))).unwrap();
match (known_tlvs, even_tlvs) {
(true, _) => {
nodes[1].node.claim_funds_with_known_custom_tlvs(our_payment_preimage);
- let expected_total_fee_msat = pass_claimed_payment_along_route(&nodes[0], &[&[&nodes[1]]], &[0; 1], false, our_payment_preimage);
+ let expected_total_fee_msat = pass_claimed_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], our_payment_preimage));
expect_payment_sent!(&nodes[0], our_payment_preimage, Some(expected_total_fee_msat));
},
(false, false) => {
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
- let payment_claimable = pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000,
- payment_hash, Some(payment_secret), events.pop().unwrap(), true, None).unwrap();
- match payment_claimable {
- Event::PaymentClaimable { onion_fields, .. } => {
- assert_eq!(&onion_fields.unwrap().custom_tlvs()[..], &custom_tlvs[..]);
- },
- _ => panic!("Unexpected event"),
- };
+ let path = &[&nodes[1], &nodes[2]];
+ let args = PassAlongPathArgs::new(&nodes[0], path, 1_000_000, payment_hash, events.pop().unwrap())
+ .with_payment_secret(payment_secret)
+ .with_custom_tlvs(custom_tlvs);
+ do_pass_along_path(args);
claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], false, payment_preimage);
}
use crate::ln::features::{InitFeatures, NodeFeatures};
use crate::ln::msgs;
use crate::ln::msgs::{ChannelMessageHandler, LightningError, SocketAddress, OnionMessageHandler, RoutingMessageHandler};
-#[cfg(not(c_bindings))]
-use crate::ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager};
use crate::util::ser::{VecWriter, Writeable, Writer};
use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor, NextNoiseStep, MessageBuf, MSG_BUF_ALLOC_SIZE};
use crate::ln::wire;
use crate::ln::wire::{Encode, Type};
-#[cfg(not(c_bindings))]
-use crate::onion_message::messenger::{SimpleArcOnionMessenger, SimpleRefOnionMessenger};
use crate::onion_message::messenger::{CustomOnionMessageHandler, PendingOnionMessage};
use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
use crate::onion_message::packet::OnionMessageContents;
use crate::routing::gossip::{NodeId, NodeAlias};
use crate::util::atomic_counter::AtomicCounter;
-use crate::util::logger::{Logger, WithContext};
+use crate::util::logger::{Level, Logger, WithContext};
use crate::util::string::PrintableString;
+#[allow(unused_imports)]
use crate::prelude::*;
+
use crate::io;
-use alloc::collections::VecDeque;
use crate::sync::{Mutex, MutexGuard, FairRwLock};
use core::sync::atomic::{AtomicBool, AtomicU32, AtomicI32, Ordering};
use core::{cmp, hash, fmt, mem};
use std::error;
#[cfg(not(c_bindings))]
use {
+ crate::ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager},
+ crate::onion_message::messenger::{SimpleArcOnionMessenger, SimpleRefOnionMessenger},
crate::routing::gossip::{NetworkGraph, P2PGossipSync},
crate::sign::KeysManager,
crate::sync::Arc,
fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: msgs::QueryShortChannelIds) -> Result<(), LightningError> { Ok(()) }
fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
- InitFeatures::empty()
+ let mut features = InitFeatures::empty();
+ features.set_gossip_queries_optional();
+ features
}
fn processing_queue_high(&self) -> bool { false }
}
// Any messages which are related to a specific channel generate an error message to let the
// peer know we don't care about channels.
fn handle_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) {
- ErroringMessageHandler::push_error(self, their_node_id, msg.temporary_channel_id);
+ ErroringMessageHandler::push_error(self, their_node_id, msg.common_fields.temporary_channel_id);
}
fn handle_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) {
- ErroringMessageHandler::push_error(self, their_node_id, msg.temporary_channel_id);
+ ErroringMessageHandler::push_error(self, their_node_id, msg.common_fields.temporary_channel_id);
}
fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) {
ErroringMessageHandler::push_error(self, their_node_id, msg.temporary_channel_id);
fn handle_stfu(&self, their_node_id: &PublicKey, msg: &msgs::Stfu) {
ErroringMessageHandler::push_error(&self, their_node_id, msg.channel_id);
}
+ #[cfg(splicing)]
fn handle_splice(&self, their_node_id: &PublicKey, msg: &msgs::Splice) {
ErroringMessageHandler::push_error(&self, their_node_id, msg.channel_id);
}
+ #[cfg(splicing)]
fn handle_splice_ack(&self, their_node_id: &PublicKey, msg: &msgs::SpliceAck) {
ErroringMessageHandler::push_error(&self, their_node_id, msg.channel_id);
}
+ #[cfg(splicing)]
fn handle_splice_locked(&self, their_node_id: &PublicKey, msg: &msgs::SpliceLocked) {
ErroringMessageHandler::push_error(&self, their_node_id, msg.channel_id);
}
}
fn handle_open_channel_v2(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannelV2) {
- ErroringMessageHandler::push_error(self, their_node_id, msg.temporary_channel_id);
+ ErroringMessageHandler::push_error(self, their_node_id, msg.common_fields.temporary_channel_id);
}
fn handle_accept_channel_v2(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannelV2) {
- ErroringMessageHandler::push_error(self, their_node_id, msg.temporary_channel_id);
+ ErroringMessageHandler::push_error(self, their_node_id, msg.common_fields.temporary_channel_id);
}
fn handle_tx_add_input(&self, their_node_id: &PublicKey, msg: &msgs::TxAddInput) {
fn disconnect_socket(&mut self);
}
+/// Details of a connected peer as returned by [`PeerManager::list_peers`].
+pub struct PeerDetails {
+ /// The node id of the peer.
+ ///
+ /// For outbound connections, this [`PublicKey`] will be the same as the `their_node_id` parameter
+ /// passed in to [`PeerManager::new_outbound_connection`].
+ pub counterparty_node_id: PublicKey,
+ /// The socket address the peer provided in the initial handshake.
+ ///
+ /// Will only be `Some` if an address had been previously provided to
+ /// [`PeerManager::new_outbound_connection`] or [`PeerManager::new_inbound_connection`].
+ pub socket_address: Option<SocketAddress>,
+ /// The features the peer provided in the initial handshake.
+ pub init_features: InitFeatures,
+ /// Indicates the direction of the peer connection.
+ ///
+ /// Will be `true` for inbound connections, and `false` for outbound connections.
+ pub is_inbound_connection: bool,
+}
+
/// Error for PeerManager errors. If you get one of these, you must disconnect the socket and
/// generate no further read_event/write_buffer_space_avail/socket_disconnected calls for the
/// descriptor.
PeerManager {
message_handler,
- peers: FairRwLock::new(HashMap::new()),
- node_id_to_descriptor: Mutex::new(HashMap::new()),
+ peers: FairRwLock::new(new_hash_map()),
+ node_id_to_descriptor: Mutex::new(new_hash_map()),
event_processing_state: AtomicI32::new(0),
ephemeral_key_midstate,
peer_counter: AtomicCounter::new(),
}
}
- /// Get a list of tuples mapping from node id to network addresses for peers which have
- /// completed the initial handshake.
- ///
- /// For outbound connections, the [`PublicKey`] will be the same as the `their_node_id` parameter
- /// passed in to [`Self::new_outbound_connection`], however entries will only appear once the initial
- /// handshake has completed and we are sure the remote peer has the private key for the given
- /// [`PublicKey`].
- ///
- /// The returned `Option`s will only be `Some` if an address had been previously given via
- /// [`Self::new_outbound_connection`] or [`Self::new_inbound_connection`].
- pub fn get_peer_node_ids(&self) -> Vec<(PublicKey, Option<SocketAddress>)> {
+ /// Returns a list of [`PeerDetails`] for connected peers that have completed the initial
+ /// handshake.
+ pub fn list_peers(&self) -> Vec<PeerDetails> {
let peers = self.peers.read().unwrap();
peers.values().filter_map(|peer_mutex| {
let p = peer_mutex.lock().unwrap();
if !p.handshake_complete() {
return None;
}
- Some((p.their_node_id.unwrap().0, p.their_socket_address.clone()))
+ let details = PeerDetails {
+ // unwrap safety: their_node_id is guaranteed to be `Some` after the handshake
+ // completed.
+ counterparty_node_id: p.their_node_id.unwrap().0,
+ socket_address: p.their_socket_address.clone(),
+ // unwrap safety: their_features is guaranteed to be `Some` after the handshake
+ // completed.
+ init_features: p.their_features.clone().unwrap(),
+ is_inbound_connection: p.inbound_connection,
+ };
+ Some(details)
}).collect()
}
+ /// Returns the [`PeerDetails`] of a connected peer that has completed the initial handshake.
+ ///
+ /// Will return `None` if the peer is unknown or it hasn't completed the initial handshake.
+ pub fn peer_by_node_id(&self, their_node_id: &PublicKey) -> Option<PeerDetails> {
+ let peers = self.peers.read().unwrap();
+ peers.values().find_map(|peer_mutex| {
+ let p = peer_mutex.lock().unwrap();
+ if !p.handshake_complete() {
+ return None;
+ }
+
+ // unwrap safety: their_node_id is guaranteed to be `Some` after the handshake
+ // completed.
+ let counterparty_node_id = p.their_node_id.unwrap().0;
+
+ if counterparty_node_id != *their_node_id {
+ return None;
+ }
+
+ let details = PeerDetails {
+ counterparty_node_id,
+ socket_address: p.their_socket_address.clone(),
+ // unwrap safety: their_features is guaranteed to be `Some` after the handshake
+ // completed.
+ init_features: p.their_features.clone().unwrap(),
+ is_inbound_connection: p.inbound_connection,
+ };
+ Some(details)
+ })
+ }
+
fn get_ephemeral_key(&self) -> SecretKey {
let mut ephemeral_hash = self.ephemeral_key_midstate.clone();
let counter = self.peer_counter.get_increment();
return Err(PeerHandleError { });
},
msgs::ErrorAction::IgnoreAndLog(level) => {
- log_given_level!(logger, level, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err);
+ log_given_level!(logger, level, "Error handling {}message{}; ignoring: {}",
+ if level == Level::Gossip { "gossip " } else { "" },
+ OptionalFromDebugger(&peer_node_id), e.err);
continue
},
msgs::ErrorAction::IgnoreDuplicateGossip => continue, // Don't even bother logging these
let networks = self.message_handler.chan_handler.get_chain_hashes();
let resp = msgs::Init { features, networks, remote_network_address: filter_addresses(peer.their_socket_address.clone()) };
self.enqueue_message(peer, &resp);
- peer.awaiting_pong_timer_tick_intervals = 0;
},
NextNoiseStep::ActThree => {
let their_node_id = try_potential_handleerror!(peer,
let networks = self.message_handler.chan_handler.get_chain_hashes();
let resp = msgs::Init { features, networks, remote_network_address: filter_addresses(peer.their_socket_address.clone()) };
self.enqueue_message(peer, &resp);
- peer.awaiting_pong_timer_tick_intervals = 0;
},
NextNoiseStep::NoiseComplete => {
if peer.pending_read_is_header {
}
(msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { }),
(msgs::DecodeError::Io(_), _) => return Err(PeerHandleError { }),
+ (msgs::DecodeError::DangerousValue, _) => return Err(PeerHandleError { }),
}
}
};
}
/// Process an incoming message and return a decision (ok, lightning error, peer handling error) regarding the next action with the peer
+ ///
/// Returns the message back if it needs to be broadcasted to all other peers.
fn handle_message(
&self,
peer_mutex: &Mutex<Peer>,
- mut peer_lock: MutexGuard<Peer>,
- message: wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>
- ) -> Result<Option<wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> {
+ peer_lock: MutexGuard<Peer>,
+ message: wire::Message<<<CMH as Deref>::Target as wire::CustomMessageReader>::CustomMessage>
+ ) -> Result<Option<wire::Message<<<CMH as Deref>::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> {
let their_node_id = peer_lock.their_node_id.clone().expect("We know the peer's public key by the time we receive messages").0;
let logger = WithContext::from(&self.logger, Some(their_node_id), None);
+
+ let message = match self.do_handle_message_holding_peer_lock(peer_lock, message, &their_node_id, &logger)? {
+ Some(processed_message) => processed_message,
+ None => return Ok(None),
+ };
+
+ self.do_handle_message_without_peer_lock(peer_mutex, message, &their_node_id, &logger)
+ }
+
+ // Conducts all message processing that requires us to hold the `peer_lock`.
+ //
+ // Returns `None` if the message was fully processed and otherwise returns the message back to
+ // allow it to be subsequently processed by `do_handle_message_without_peer_lock`.
+ fn do_handle_message_holding_peer_lock<'a>(
+ &self,
+ mut peer_lock: MutexGuard<Peer>,
+ message: wire::Message<<<CMH as Deref>::Target as wire::CustomMessageReader>::CustomMessage>,
+ their_node_id: &PublicKey,
+ logger: &WithContext<'a, L>
+ ) -> Result<Option<wire::Message<<<CMH as Deref>::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError>
+ {
peer_lock.received_message_since_timer_tick = true;
// Need an Init as first message
return Err(PeerHandleError { }.into());
}
+ peer_lock.awaiting_pong_timer_tick_intervals = 0;
peer_lock.their_features = Some(msg.features);
return Ok(None);
} else if peer_lock.their_features.is_none() {
peer_lock.received_channel_announce_since_backlogged = true;
}
- mem::drop(peer_lock);
+ Ok(Some(message))
+ }
+ // Conducts all message processing that doesn't require us to hold the `peer_lock`.
+ //
+ // Returns the message back if it needs to be broadcasted to all other peers.
+ fn do_handle_message_without_peer_lock<'a>(
+ &self,
+ peer_mutex: &Mutex<Peer>,
+ message: wire::Message<<<CMH as Deref>::Target as wire::CustomMessageReader>::CustomMessage>,
+ their_node_id: &PublicKey,
+ logger: &WithContext<'a, L>
+ ) -> Result<Option<wire::Message<<<CMH as Deref>::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError>
+ {
if is_gossip_msg(message.type_id()) {
log_gossip!(logger, "Received message {:?} from {}", message, log_pubkey!(their_node_id));
} else {
self.message_handler.chan_handler.handle_stfu(&their_node_id, &msg);
}
+ #[cfg(splicing)]
// Splicing messages:
wire::Message::Splice(msg) => {
self.message_handler.chan_handler.handle_splice(&their_node_id, &msg);
}
+ #[cfg(splicing)]
wire::Message::SpliceAck(msg) => {
self.message_handler.chan_handler.handle_splice_ack(&their_node_id, &msg);
}
+ #[cfg(splicing)]
wire::Message::SpliceLocked(msg) => {
self.message_handler.chan_handler.handle_splice_locked(&their_node_id, &msg);
}
Ok(should_forward)
}
- fn forward_broadcast_msg(&self, peers: &HashMap<Descriptor, Mutex<Peer>>, msg: &wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>, except_node: Option<&PublicKey>) {
+ fn forward_broadcast_msg(&self, peers: &HashMap<Descriptor, Mutex<Peer>>, msg: &wire::Message<<<CMH as Deref>::Target as wire::CustomMessageReader>::CustomMessage>, except_node: Option<&PublicKey>) {
match msg {
wire::Message::ChannelAnnouncement(ref msg) => {
log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced channel's counterparties: {:?}", except_node, msg);
self.update_gossip_backlogged();
let flush_read_disabled = self.gossip_processing_backlog_lifted.swap(false, Ordering::Relaxed);
- let mut peers_to_disconnect = HashMap::new();
+ let mut peers_to_disconnect = new_hash_map();
{
let peers_lock = self.peers.read().unwrap();
for event in events_generated.drain(..) {
match event {
MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendAcceptChannel event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id)), "Handling SendAcceptChannel event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
- &msg.temporary_channel_id);
+ &msg.common_fields.temporary_channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendAcceptChannelV2 { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendAcceptChannelV2 event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id)), "Handling SendAcceptChannelV2 event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
- &msg.temporary_channel_id);
+ &msg.common_fields.temporary_channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id)), "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
- &msg.temporary_channel_id);
+ &msg.common_fields.temporary_channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendOpenChannelV2 { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id)), "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
- &msg.temporary_channel_id);
+ &msg.common_fields.temporary_channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => {
log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
log_pubkey!(node_id),
&msg.temporary_channel_id,
- log_funding_channel_id!(msg.funding_txid, msg.funding_output_index));
+ ChannelId::v1_from_funding_txid(msg.funding_txid.as_byte_array(), msg.funding_output_index));
// TODO: If the peer is gone we should generate a DiscardFunding event
// indicating to the wallet that they should just throw away this funding transaction
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
// We do not have the peers write lock, so we just store that we're
// about to disconnect the peer and do it after we finish
// processing most messages.
- let msg = msg.map(|msg| wire::Message::<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>::Error(msg));
+ let msg = msg.map(|msg| wire::Message::<<<CMH as Deref>::Target as wire::CustomMessageReader>::CustomMessage>::Error(msg));
peers_to_disconnect.insert(node_id, msg);
},
msgs::ErrorAction::DisconnectPeerWithWarning { msg } => {
use crate::ln::ChannelId;
use crate::ln::features::{InitFeatures, NodeFeatures};
use crate::ln::peer_channel_encryptor::PeerChannelEncryptor;
- use crate::ln::peer_handler::{CustomMessageHandler, PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses};
+ use crate::ln::peer_handler::{CustomMessageHandler, PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses, ErroringMessageHandler, MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER};
use crate::ln::{msgs, wire};
use crate::ln::msgs::{LightningError, SocketAddress};
use crate::util::test_utils;
use bitcoin::blockdata::constants::ChainHash;
use bitcoin::secp256k1::{PublicKey, SecretKey};
- use crate::prelude::*;
use crate::sync::{Arc, Mutex};
use core::convert::Infallible;
use core::sync::atomic::{AtomicBool, Ordering};
+ #[allow(unused_imports)]
+ use crate::prelude::*;
+
#[derive(Clone)]
struct FileDescriptor {
fd: u16,
};
let addr_a = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1000};
let id_b = peer_b.node_signer.get_node_id(Recipient::Node).unwrap();
+ let features_a = peer_a.init_features(&id_b);
+ let features_b = peer_b.init_features(&id_a);
let mut fd_b = FileDescriptor {
fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
disconnect: Arc::new(AtomicBool::new(false)),
let a_data = fd_a.outbound_data.lock().unwrap().split_off(0);
assert_eq!(peer_b.read_event(&mut fd_b, &a_data).unwrap(), false);
- assert!(peer_a.get_peer_node_ids().contains(&(id_b, Some(addr_b))));
- assert!(peer_b.get_peer_node_ids().contains(&(id_a, Some(addr_a))));
-
+ assert_eq!(peer_a.peer_by_node_id(&id_b).unwrap().counterparty_node_id, id_b);
+ assert_eq!(peer_a.peer_by_node_id(&id_b).unwrap().socket_address, Some(addr_b));
+ assert_eq!(peer_a.peer_by_node_id(&id_b).unwrap().init_features, features_b);
+ assert_eq!(peer_b.peer_by_node_id(&id_a).unwrap().counterparty_node_id, id_a);
+ assert_eq!(peer_b.peer_by_node_id(&id_a).unwrap().socket_address, Some(addr_a));
+ assert_eq!(peer_b.peer_by_node_id(&id_a).unwrap().init_features, features_a);
(fd_a.clone(), fd_b.clone())
}
assert!(peers[0].read_event(&mut fd_a, &b_data).is_err());
}
+ #[test]
+ fn test_inbound_conn_handshake_complete_awaiting_pong() {
+ // Test that we do not disconnect an outbound peer after the noise handshake completes due
+ // to a pong timeout for a ping that was never sent if a timer tick fires after we send act
+ // two of the noise handshake along with our init message but before we receive their init
+ // message.
+ let logger = test_utils::TestLogger::new();
+ let node_signer_a = test_utils::TestNodeSigner::new(SecretKey::from_slice(&[42; 32]).unwrap());
+ let node_signer_b = test_utils::TestNodeSigner::new(SecretKey::from_slice(&[43; 32]).unwrap());
+ let peer_a = PeerManager::new(MessageHandler {
+ chan_handler: ErroringMessageHandler::new(),
+ route_handler: IgnoringMessageHandler {},
+ onion_message_handler: IgnoringMessageHandler {},
+ custom_message_handler: IgnoringMessageHandler {},
+ }, 0, &[0; 32], &logger, &node_signer_a);
+ let peer_b = PeerManager::new(MessageHandler {
+ chan_handler: ErroringMessageHandler::new(),
+ route_handler: IgnoringMessageHandler {},
+ onion_message_handler: IgnoringMessageHandler {},
+ custom_message_handler: IgnoringMessageHandler {},
+ }, 0, &[1; 32], &logger, &node_signer_b);
+
+ let a_id = node_signer_a.get_node_id(Recipient::Node).unwrap();
+ let mut fd_a = FileDescriptor {
+ fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
+ disconnect: Arc::new(AtomicBool::new(false)),
+ };
+ let mut fd_b = FileDescriptor {
+ fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
+ disconnect: Arc::new(AtomicBool::new(false)),
+ };
+
+ // Exchange messages with both peers until they both complete the init handshake.
+ let act_one = peer_b.new_outbound_connection(a_id, fd_b.clone(), None).unwrap();
+ peer_a.new_inbound_connection(fd_a.clone(), None).unwrap();
+
+ assert_eq!(peer_a.read_event(&mut fd_a, &act_one).unwrap(), false);
+ peer_a.process_events();
+
+ let act_two = fd_a.outbound_data.lock().unwrap().split_off(0);
+ assert_eq!(peer_b.read_event(&mut fd_b, &act_two).unwrap(), false);
+ peer_b.process_events();
+
+ // Calling this here triggers the race on inbound connections.
+ peer_b.timer_tick_occurred();
+
+ let act_three_with_init_b = fd_b.outbound_data.lock().unwrap().split_off(0);
+ assert!(!peer_a.peers.read().unwrap().get(&fd_a).unwrap().lock().unwrap().handshake_complete());
+ assert_eq!(peer_a.read_event(&mut fd_a, &act_three_with_init_b).unwrap(), false);
+ peer_a.process_events();
+ assert!(peer_a.peers.read().unwrap().get(&fd_a).unwrap().lock().unwrap().handshake_complete());
+
+ let init_a = fd_a.outbound_data.lock().unwrap().split_off(0);
+ assert!(!init_a.is_empty());
+
+ assert!(!peer_b.peers.read().unwrap().get(&fd_b).unwrap().lock().unwrap().handshake_complete());
+ assert_eq!(peer_b.read_event(&mut fd_b, &init_a).unwrap(), false);
+ peer_b.process_events();
+ assert!(peer_b.peers.read().unwrap().get(&fd_b).unwrap().lock().unwrap().handshake_complete());
+
+ // Make sure we're still connected.
+ assert_eq!(peer_b.peers.read().unwrap().len(), 1);
+
+ // B should send a ping on the first timer tick after `handshake_complete`.
+ assert!(fd_b.outbound_data.lock().unwrap().split_off(0).is_empty());
+ peer_b.timer_tick_occurred();
+ peer_b.process_events();
+ assert!(!fd_b.outbound_data.lock().unwrap().split_off(0).is_empty());
+
+ let mut send_warning = || {
+ {
+ let peers = peer_a.peers.read().unwrap();
+ let mut peer_b = peers.get(&fd_a).unwrap().lock().unwrap();
+ peer_a.enqueue_message(&mut peer_b, &msgs::WarningMessage {
+ channel_id: ChannelId([0; 32]),
+ data: "no disconnect plz".to_string(),
+ });
+ }
+ peer_a.process_events();
+ let msg = fd_a.outbound_data.lock().unwrap().split_off(0);
+ assert!(!msg.is_empty());
+ assert_eq!(peer_b.read_event(&mut fd_b, &msg).unwrap(), false);
+ peer_b.process_events();
+ };
+
+ // Fire more ticks until we reach the pong timeout. We send any message except pong to
+ // pretend the connection is still alive.
+ send_warning();
+ for _ in 0..MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER {
+ peer_b.timer_tick_occurred();
+ send_warning();
+ }
+ assert_eq!(peer_b.peers.read().unwrap().len(), 1);
+
+ // One more tick should enforce the pong timeout.
+ peer_b.timer_tick_occurred();
+ assert_eq!(peer_b.peers.read().unwrap().len(), 0);
+ }
+
#[test]
fn test_filter_addresses(){
// Tests the filter_addresses function.
use crate::routing::gossip::RoutingFees;
use crate::routing::router::{PaymentParameters, RouteHint, RouteHintHop};
use crate::ln::features::ChannelTypeFeatures;
-use crate::ln::msgs;
+use crate::ln::{msgs, ChannelId};
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ChannelUpdate, ErrorAction};
use crate::ln::wire::Encode;
use crate::util::config::{UserConfig, MaxDustHTLCExposure};
use crate::util::test_utils;
use crate::prelude::*;
-use core::default::Default;
use crate::ln::functional_test_utils::*;
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(scid_privacy_cfg)).unwrap();
let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- assert!(!open_channel.channel_type.as_ref().unwrap().supports_scid_privacy()); // we ignore `negotiate_scid_privacy` on pub channels
- open_channel.channel_type.as_mut().unwrap().set_scid_privacy_required();
- assert_eq!(open_channel.channel_flags & 1, 1); // The `announce_channel` bit is set.
+ assert!(!open_channel.common_fields.channel_type.as_ref().unwrap().supports_scid_privacy()); // we ignore `negotiate_scid_privacy` on pub channels
+ open_channel.common_fields.channel_type.as_mut().unwrap().set_scid_privacy_required();
+ assert_eq!(open_channel.common_fields.channel_flags & 1, 1); // The `announce_channel` bit is set.
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
let err = get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id());
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(scid_privacy_cfg)).unwrap();
let init_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- assert!(init_open_channel.channel_type.as_ref().unwrap().supports_scid_privacy());
+ assert!(init_open_channel.common_fields.channel_type.as_ref().unwrap().supports_scid_privacy());
assert!(nodes[0].node.list_channels()[0].channel_type.is_none()); // channel_type is none until counterparty accepts
// now simulate nodes[1] responding with an Error message, indicating it doesn't understand
// SCID alias.
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage {
- channel_id: init_open_channel.temporary_channel_id,
+ channel_id: init_open_channel.common_fields.temporary_channel_id,
data: "Yo, no SCID aliases, no privacy here!".to_string()
});
assert!(nodes[0].node.list_channels()[0].channel_type.is_none()); // channel_type is none until counterparty accepts
let second_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- assert!(!second_open_channel.channel_type.as_ref().unwrap().supports_scid_privacy());
+ assert!(!second_open_channel.common_fields.channel_type.as_ref().unwrap().supports_scid_privacy());
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &second_open_channel);
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
nodes[1].node.create_channel(nodes[2].node.get_our_node_id(), 100_000, 10_000, 42, None, Some(no_announce_cfg)).unwrap();
let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[2].node.get_our_node_id());
- assert!(open_channel.channel_type.as_ref().unwrap().requires_scid_privacy());
+ assert!(open_channel.common_fields.channel_type.as_ref().unwrap().requires_scid_privacy());
nodes[2].node.handle_open_channel(&nodes[1].node.get_our_node_id(), &open_channel);
let accept_channel = get_event_msg!(nodes[2], MessageSendEvent::SendAcceptChannel, nodes[1].node.get_our_node_id());
};
let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
- assert_eq!(accept_channel.minimum_depth, 0);
+ assert_eq!(accept_channel.common_fields.minimum_depth, 0);
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
- let channel_id = funding_output.to_channel_id();
+ let channel_id = ChannelId::v1_from_funding_outpoint(funding_output);
nodes[1].chain_monitor.complete_sole_pending_chan_update(&channel_id);
expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- open_channel_msg.channel_type = Some(channel_type_features.clone());
+ open_channel_msg.common_fields.channel_type = Some(channel_type_features.clone());
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel,
nodes[1].node.get_our_node_id());
- open_channel_msg.channel_type = Some(channel_type_features.clone());
+ open_channel_msg.common_fields.channel_type = Some(channel_type_features.clone());
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel,
nodes[1].node.get_our_node_id());
- open_channel_msg.channel_type = Some(channel_type_features);
+ open_channel_msg.common_fields.channel_type = Some(channel_type_features);
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
};
let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
- assert_eq!(accept_channel.minimum_depth, 0);
+ assert_eq!(accept_channel.common_fields.minimum_depth, 0);
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
let events = nodes[0].node.get_and_clear_pending_events();
use crate::chain::transaction::OutPoint;
use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields};
-use crate::ln::msgs;
+use crate::ln::{msgs, ChannelId};
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
use crate::util::test_channel_signer::TestChannelSigner;
use crate::util::test_utils;
use bitcoin::hash_types::BlockHash;
use crate::prelude::*;
-use core::default::Default;
use crate::sync::Mutex;
use crate::ln::functional_test_utils::*;
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
let chan_0_monitor_serialized =
- get_monitor!(nodes[0], OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).encode();
+ get_monitor!(nodes[0], ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 })).encode();
reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
}
let mut nodes_0_read = &nodes_0_serialized[..];
- if let Err(msgs::DecodeError::InvalidValue) =
+ if let Err(msgs::DecodeError::DangerousValue) =
<(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
default_config: UserConfig::default(),
entropy_source: keys_manager,
assert!(nodes_0_read.is_empty());
for monitor in node_0_monitors.drain(..) {
- assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
+ let funding_outpoint = monitor.get_funding_txo().0;
+ assert_eq!(nodes[0].chain_monitor.watch_channel(funding_outpoint, monitor),
Ok(ChannelMonitorUpdateStatus::Completed));
check_added_monitors!(nodes[0], 1);
}
assert_eq!(send_events.len(), 2);
let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut send_events);
let node_2_msgs = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut send_events);
- do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, true, false, None, false);
- do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_2_msgs, true, false, None, false);
+ do_pass_along_path(PassAlongPathArgs::new(&nodes[0],&[&nodes[1], &nodes[3]], 15_000_000, payment_hash, node_1_msgs)
+ .with_payment_secret(payment_secret)
+ .without_clearing_recipient_events());
+ do_pass_along_path(PassAlongPathArgs::new(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, node_2_msgs)
+ .with_payment_secret(payment_secret)
+ .without_clearing_recipient_events());
// Now that we have an MPP payment pending, get the latest encoded copies of nodes[3]'s
// monitors and ChannelManager, for use later, if we don't want to persist both monitors.
let mut original_monitor = test_utils::TestVecWriter(Vec::new());
if !persist_both_monitors {
- for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
- if outpoint.to_channel_id() == chan_id_not_persisted {
+ for (outpoint, channel_id) in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+ if channel_id == chan_id_not_persisted {
assert!(original_monitor.0.is_empty());
nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
}
// crashed in between the two persistence calls - using one old ChannelMonitor and one new one,
// with the old ChannelManager.
let mut updated_monitor = test_utils::TestVecWriter(Vec::new());
- for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
- if outpoint.to_channel_id() == chan_id_persisted {
+ for (outpoint, channel_id) in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+ if channel_id == chan_id_persisted {
assert!(updated_monitor.0.is_empty());
nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut updated_monitor).unwrap();
}
}
// If `persist_both_monitors` is set, get the second monitor here as well
if persist_both_monitors {
- for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
- if outpoint.to_channel_id() == chan_id_not_persisted {
+ for (outpoint, channel_id) in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+ if channel_id == chan_id_not_persisted {
assert!(original_monitor.0.is_empty());
nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
}
assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
// Reload the node while a subset of the channels in the funding batch have persisted monitors.
- let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id();
+ let channel_id_1 = ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 });
let node_encoded = nodes[0].node.encode();
let channel_monitor_1_serialized = get_monitor!(nodes[0], channel_id_1).encode();
reload_node!(nodes[0], node_encoded, &[&channel_monitor_1_serialized], new_persister, new_chain_monitor, new_channel_manager);
use crate::chain::Confirm;
use crate::events::{Event, MessageSendEventsProvider, ClosureReason, HTLCDestination, MessageSendEvent};
use crate::ln::msgs::{ChannelMessageHandler, Init};
+use crate::sign::OutputSpender;
use crate::util::test_utils;
use crate::util::ser::Writeable;
use crate::util::string::UntrustedString;
use crate::prelude::*;
-use crate::ln::functional_test_utils::*;
+use crate::ln::{functional_test_utils::*, ChannelId};
fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
// Our on-chain HTLC-claim learning has a few properties worth testing:
// Connect blocks on node B
connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
check_closed_broadcast!(nodes[1], true);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 1000000);
+ check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 1000000);
check_added_monitors!(nodes[1], 1);
// Verify node B broadcast 2 HTLC-timeout txn
let partial_claim_tx = {
let (_, _, chan_id, funding_tx) =
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000);
let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
- assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+ assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
let remote_txn_a = get_local_commitment_txn!(nodes[0], chan_id);
let remote_txn_b = get_local_commitment_txn!(nodes[1], chan_id);
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
check_closed_broadcast(&nodes[0], 1, true);
check_added_monitors(&nodes[0], 1);
- check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100_000);
+ check_closed_event(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false, &[nodes[1].node.get_our_node_id()], 100_000);
{
let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
use crate::ln::msgs::DecodeError;
use crate::util::ser::{Readable, Writeable, Writer};
-use core::convert::TryFrom;
use crate::io;
+#[allow(unused_imports)]
+use crate::prelude::*;
+
/// A script pubkey for shutting down a channel as defined by [BOLT #2].
///
/// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
#[cfg(test)]
mod shutdown_script_tests {
use super::ShutdownScript;
+
+ use bitcoin::address::{WitnessProgram, WitnessVersion};
use bitcoin::blockdata::opcodes;
use bitcoin::blockdata::script::{Builder, ScriptBuf};
use bitcoin::secp256k1::Secp256k1;
use bitcoin::secp256k1::{PublicKey, SecretKey};
+
use crate::ln::features::InitFeatures;
- use core::convert::TryFrom;
- use bitcoin::address::{WitnessProgram, WitnessVersion};
+ use crate::prelude::*;
fn pubkey() -> bitcoin::key::PublicKey {
let secp_ctx = Secp256k1::signing_only();
use crate::util::errors::APIError;
use crate::util::config::UserConfig;
use crate::util::string::UntrustedString;
+use crate::prelude::*;
use bitcoin::{Transaction, TxOut};
use bitcoin::blockdata::locktime::absolute::LockTime;
use bitcoin::network::constants::Network;
use bitcoin::address::{WitnessProgram, WitnessVersion};
-use regex;
-
-use core::default::Default;
-use std::convert::TryFrom;
-
use crate::ln::functional_test_utils::*;
#[test]
mine_transaction(&nodes[0], &tx);
mine_transaction(&nodes[1], &tx);
- nodes[0].node.close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.close_channel(&ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 }), &nodes[1].node.get_our_node_id()).unwrap();
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
assert!(nodes[0].node.list_channels().is_empty());
assert!(nodes[1].node.list_channels().is_empty());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 8000000);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 8000000);
+ check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 8000000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 8000000);
}
#[test]
assert!(nodes[0].node.list_channels().is_empty());
assert!(nodes[1].node.list_channels().is_empty());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
assert!(node_1_none.is_none());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
// Shutdown basically removes the channelDetails, testing of shutdowncomplete state unnecessary
assert!(nodes[0].node.list_channels().is_empty());
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
assert!(node_1_none.is_none());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
// Shutdown basically removes the channelDetails, testing of shutdowncomplete state unnecessary
assert!(nodes[0].node.list_channels().is_empty());
.into_script();
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &msgs::Shutdown {
- channel_id: open_chan.temporary_channel_id, scriptpubkey: script,
+ channel_id: open_chan.common_fields.temporary_channel_id, scriptpubkey: script,
});
check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyCoopClosedUnfundedChannel, [nodes[1].node.get_our_node_id()], 1_000_000);
}
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
assert!(node_1_none.is_none());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
assert!(nodes[0].node.list_channels().is_empty());
close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
assert!(nodes[1].node.list_channels().is_empty());
assert!(nodes[2].node.list_channels().is_empty());
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[2], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
let route_params = if blinded_recipient {
crate::ln::blinded_payment_tests::get_blinded_route_parameters(
- amt_msat, our_payment_secret,
+ amt_msat, our_payment_secret, 1, 100000000,
nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_2.0.contents],
&chanmon_cfgs[2].keys_manager)
} else {
close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
assert!(nodes[1].node.list_channels().is_empty());
assert!(nodes[2].node.list_channels().is_empty());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[1], 2, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id(), nodes[2].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ let event1 = ExpectedCloseEvent {
+ channel_capacity_sats: Some(100000),
+ channel_id: None,
+ counterparty_node_id: Some(nodes[0].node.get_our_node_id()),
+ discard_funding: false,
+ reason: Some(ClosureReason::LocallyInitiatedCooperativeClosure),
+ channel_funding_txo: None,
+ user_channel_id: None,
+ };
+ let event2 = ExpectedCloseEvent {
+ channel_capacity_sats: Some(100000),
+ channel_id: None,
+ counterparty_node_id: Some(nodes[2].node.get_our_node_id()),
+ discard_funding: false,
+ reason: Some(ClosureReason::CounterpartyInitiatedCooperativeClosure),
+ channel_funding_txo: None,
+ user_channel_id: None,
+ };
+ check_closed_events(&nodes[1], &[event1, event2]);
+ check_closed_event!(nodes[2], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
}
fn do_test_shutdown_rebroadcast(recv_count: u8) {
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
assert!(node_1_none.is_none());
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
} else {
// If one node, however, received + responded with an identical closing_signed we end
// up erroring and node[0] will try to broadcast its own latest commitment transaction.
close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
assert!(nodes[1].node.list_channels().is_empty());
assert!(nodes[2].node.list_channels().is_empty());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[2], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
// We test that in case of peer committing upfront to a script, if it changes at closing, we refuse to sign
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000);
- nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[2].node.get_our_node_id()).unwrap();
+ nodes[0].node.close_channel(&chan.2, &nodes[2].node.get_our_node_id()).unwrap();
let node_0_orig_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
let mut node_0_shutdown = node_0_orig_shutdown.clone();
node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
// We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000);
- nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[2].node.get_our_node_id()).unwrap();
+ nodes[0].node.close_channel(&chan.2, &nodes[2].node.get_our_node_id()).unwrap();
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
// We test that in case of peer committing upfront to a script, if it oesn't change at closing, we sign
nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
// We test that if case of peer non-signaling we don't enforce committed script at channel opening
*nodes[0].override_init_features.borrow_mut() = Some(nodes[0].node.init_features().clear_upfront_shutdown_script());
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
- nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
let node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_1_shutdown);
check_added_monitors!(nodes[1], 1);
// channel smoothly, opt-out is from channel initiator here
*nodes[0].override_init_features.borrow_mut() = None;
let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1000000, 1000000);
- nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[1], 1);
let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown);
//// We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close
//// channel smoothly
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
- nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[1], 1);
let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown);
// Check script when handling an open_channel message
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- open_channel.shutdown_scriptpubkey = Some(anysegwit_shutdown_script.clone());
+ open_channel.common_fields.shutdown_scriptpubkey = Some(anysegwit_shutdown_script.clone());
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
let events = nodes[1].node.get_and_clear_pending_msg_events();
let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
- accept_channel.shutdown_scriptpubkey = Some(anysegwit_shutdown_script.clone());
+ accept_channel.common_fields.shutdown_scriptpubkey = Some(anysegwit_shutdown_script.clone());
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
let events = nodes[0].node.get_and_clear_pending_msg_events();
// Use a segwit v0 script with an unsupported witness program
let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- open_channel.shutdown_scriptpubkey = Some(Builder::new().push_int(0)
+ open_channel.common_fields.shutdown_scriptpubkey = Some(Builder::new().push_int(0)
.push_slice(&[0, 0])
.into_script());
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
- nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[1], 1);
// Use a segwit v0 script supported even without option_shutdown_anysegwit
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
- nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[1], 1);
// Use a non-v0 segwit script supported by option_shutdown_anysegwit
.expect(OnGetShutdownScriptpubkey { returns: supported_shutdown_script });
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
- match nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()) {
+ match nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()) {
Err(APIError::IncompatibleShutdownScript { script }) => {
assert_eq!(script.into_inner(), unsupported_shutdown_script.clone().into_inner());
},
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(_) => panic!("Expected error"),
}
- nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[1], 1);
// Use a non-v0 segwit script unsupported without option_shutdown_anysegwit
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
- nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[1], 1);
// Use a segwit v0 script with an unsupported witness program
let shutdown_script = ShutdownScript::try_from(script.clone()).unwrap();
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
- nodes[1].node.close_channel_with_feerate_and_script(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id(), None, Some(shutdown_script)).unwrap();
+ nodes[1].node.close_channel_with_feerate_and_script(&chan.2, &nodes[0].node.get_our_node_id(), None, Some(shutdown_script)).unwrap();
check_added_monitors!(nodes[1], 1);
let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
let shutdown_script = ShutdownScript::try_from(script).unwrap();
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
- let result = nodes[1].node.close_channel_with_feerate_and_script(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id(), None, Some(shutdown_script));
+ let result = nodes[1].node.close_channel_with_feerate_and_script(&chan.2, &nodes[0].node.get_our_node_id(), None, Some(shutdown_script));
assert_eq!(result, Err(APIError::APIMisuseError { err: "Cannot override shutdown script for a channel with one already set".to_string() }));
}
let node_0_2nd_closing_signed = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
if timeout_step == TimeoutStep::NoTimeout {
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.1.unwrap());
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
}
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
}
if timeout_step != TimeoutStep::NoTimeout {
*feerate_lock *= 10;
}
- nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap());
let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
assert!(node_0_none.is_none());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
- let chan_id = OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id();
+ let chan_id = chan.2;
nodes[0].node.close_channel_with_feerate_and_script(&chan_id, &nodes[1].node.get_our_node_id(), Some(253 * 10), None).unwrap();
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed);
let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
assert!(node_0_none.is_none());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
}
fn do_outbound_update_no_early_closing_signed(use_htlc: bool) {
let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
assert!(node_1_none.is_none());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
FundingCreated(msgs::FundingCreated),
FundingSigned(msgs::FundingSigned),
Stfu(msgs::Stfu),
+ #[cfg(splicing)]
Splice(msgs::Splice),
+ #[cfg(splicing)]
SpliceAck(msgs::SpliceAck),
+ #[cfg(splicing)]
SpliceLocked(msgs::SpliceLocked),
TxAddInput(msgs::TxAddInput),
TxAddOutput(msgs::TxAddOutput),
&Message::FundingCreated(ref msg) => msg.write(writer),
&Message::FundingSigned(ref msg) => msg.write(writer),
&Message::Stfu(ref msg) => msg.write(writer),
+ #[cfg(splicing)]
&Message::Splice(ref msg) => msg.write(writer),
+ #[cfg(splicing)]
&Message::SpliceAck(ref msg) => msg.write(writer),
+ #[cfg(splicing)]
&Message::SpliceLocked(ref msg) => msg.write(writer),
&Message::TxAddInput(ref msg) => msg.write(writer),
&Message::TxAddOutput(ref msg) => msg.write(writer),
&Message::FundingCreated(ref msg) => msg.type_id(),
&Message::FundingSigned(ref msg) => msg.type_id(),
&Message::Stfu(ref msg) => msg.type_id(),
+ #[cfg(splicing)]
&Message::Splice(ref msg) => msg.type_id(),
+ #[cfg(splicing)]
&Message::SpliceAck(ref msg) => msg.type_id(),
+ #[cfg(splicing)]
&Message::SpliceLocked(ref msg) => msg.type_id(),
&Message::TxAddInput(ref msg) => msg.type_id(),
&Message::TxAddOutput(ref msg) => msg.type_id(),
msgs::FundingSigned::TYPE => {
Ok(Message::FundingSigned(Readable::read(buffer)?))
},
+ #[cfg(splicing)]
msgs::Splice::TYPE => {
Ok(Message::Splice(Readable::read(buffer)?))
},
msgs::Stfu::TYPE => {
Ok(Message::Stfu(Readable::read(buffer)?))
},
+ #[cfg(splicing)]
msgs::SpliceAck::TYPE => {
Ok(Message::SpliceAck(Readable::read(buffer)?))
},
+ #[cfg(splicing)]
msgs::SpliceLocked::TYPE => {
Ok(Message::SpliceLocked(Readable::read(buffer)?))
},
mod tests {
use super::*;
use crate::prelude::*;
- use core::convert::TryInto;
use crate::ln::peer_handler::IgnoringMessageHandler;
// Big-endian wire encoding of Pong message (type = 19, byteslen = 2).
-// This file is Copyright its original authors, visible in version control
+ // This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
//!
//! use bitcoin::hashes::Hash;
//! use bitcoin::secp256k1::{KeyPair, PublicKey, Secp256k1, SecretKey};
-//! use core::convert::{Infallible, TryFrom};
+//! use core::convert::TryFrom;
+//! use lightning::offers::invoice::UnsignedBolt12Invoice;
//! use lightning::offers::invoice_request::InvoiceRequest;
//! use lightning::offers::refund::Refund;
//! use lightning::util::ser::Writeable;
//!
//! # use lightning::ln::PaymentHash;
-//! # use lightning::offers::invoice::BlindedPayInfo;
+//! # use lightning::offers::invoice::{BlindedPayInfo, ExplicitSigningPubkey, InvoiceBuilder};
//! # use lightning::blinded_path::BlindedPath;
//! #
//! # fn create_payment_paths() -> Vec<(BlindedPayInfo, BlindedPath)> { unimplemented!() }
//! let mut buffer = Vec::new();
//!
//! // Invoice for the "offer to be paid" flow.
+//! # <InvoiceBuilder<ExplicitSigningPubkey>>::from(
//! InvoiceRequest::try_from(bytes)?
#![cfg_attr(feature = "std", doc = "
.respond_with(payment_paths, payment_hash)?
#![cfg_attr(not(feature = "std"), doc = "
.respond_with_no_std(payment_paths, payment_hash, core::time::Duration::from_secs(0))?
")]
+//! # )
//! .relative_expiry(3600)
//! .allow_mpp()
//! .fallback_v0_p2wpkh(&wpubkey_hash)
//! .build()?
-//! .sign::<_, Infallible>(
-//! |message| Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
+//! .sign(|message: &UnsignedBolt12Invoice|
+//! Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
//! )
//! .expect("failed verifying signature")
//! .write(&mut buffer)
//! # let mut buffer = Vec::new();
//!
//! // Invoice for the "offer for money" flow.
+//! # <InvoiceBuilder<ExplicitSigningPubkey>>::from(
//! "lnr1qcp4256ypq"
//! .parse::<Refund>()?
#![cfg_attr(feature = "std", doc = "
#![cfg_attr(not(feature = "std"), doc = "
.respond_with_no_std(payment_paths, payment_hash, pubkey, core::time::Duration::from_secs(0))?
")]
+//! # )
//! .relative_expiry(3600)
//! .allow_mpp()
//! .fallback_v0_p2wpkh(&wpubkey_hash)
//! .build()?
-//! .sign::<_, Infallible>(
-//! |message| Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
+//! .sign(|message: &UnsignedBolt12Invoice|
+//! Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
//! )
//! .expect("failed verifying signature")
//! .write(&mut buffer)
use bitcoin::blockdata::constants::ChainHash;
use bitcoin::hash_types::{WPubkeyHash, WScriptHash};
-use bitcoin::hashes::Hash;
use bitcoin::network::constants::Network;
use bitcoin::secp256k1::{KeyPair, PublicKey, Secp256k1, self};
use bitcoin::secp256k1::schnorr::Signature;
use bitcoin::address::{Address, Payload, WitnessProgram, WitnessVersion};
use bitcoin::key::TweakedPublicKey;
-use core::convert::{AsRef, Infallible, TryFrom};
use core::time::Duration;
+use core::hash::{Hash, Hasher};
use crate::io;
use crate::blinded_path::BlindedPath;
use crate::ln::PaymentHash;
use crate::ln::inbound_payment::ExpandedKey;
use crate::ln::msgs::DecodeError;
use crate::offers::invoice_request::{INVOICE_REQUEST_PAYER_ID_TYPE, INVOICE_REQUEST_TYPES, IV_BYTES as INVOICE_REQUEST_IV_BYTES, InvoiceRequest, InvoiceRequestContents, InvoiceRequestTlvStream, InvoiceRequestTlvStreamRef};
-use crate::offers::merkle::{SignError, SignatureTlvStream, SignatureTlvStreamRef, TaggedHash, TlvStream, WithoutSignatures, self};
+use crate::offers::merkle::{SignError, SignFn, SignatureTlvStream, SignatureTlvStreamRef, TaggedHash, TlvStream, WithoutSignatures, self};
use crate::offers::offer::{Amount, OFFER_TYPES, OfferTlvStream, OfferTlvStreamRef, Quantity};
use crate::offers::parse::{Bolt12ParseError, Bolt12SemanticError, ParsedMessage};
use crate::offers::payer::{PAYER_METADATA_TYPE, PayerTlvStream, PayerTlvStreamRef};
use crate::util::ser::{HighZeroBytesDroppedBigSize, Iterable, SeekReadable, WithoutLength, Writeable, Writer};
use crate::util::string::PrintableString;
+#[allow(unused_imports)]
use crate::prelude::*;
#[cfg(feature = "std")]
signing_pubkey_strategy: S,
}
+/// Builds a [`Bolt12Invoice`] from either:
+/// - an [`InvoiceRequest`] for the "offer to be paid" flow or
+/// - a [`Refund`] for the "offer for money" flow.
+///
+/// See [module-level documentation] for usage.
+///
+/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
+/// [`Refund`]: crate::offers::refund::Refund
+/// [module-level documentation]: self
+#[cfg(c_bindings)]
+pub struct InvoiceWithExplicitSigningPubkeyBuilder<'a> {
+ invreq_bytes: &'a Vec<u8>,
+ invoice: InvoiceContents,
+ signing_pubkey_strategy: ExplicitSigningPubkey,
+}
+
+/// Builds a [`Bolt12Invoice`] from either:
+/// - an [`InvoiceRequest`] for the "offer to be paid" flow or
+/// - a [`Refund`] for the "offer for money" flow.
+///
+/// See [module-level documentation] for usage.
+///
+/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
+/// [`Refund`]: crate::offers::refund::Refund
+/// [module-level documentation]: self
+#[cfg(c_bindings)]
+pub struct InvoiceWithDerivedSigningPubkeyBuilder<'a> {
+ invreq_bytes: &'a Vec<u8>,
+ invoice: InvoiceContents,
+ signing_pubkey_strategy: DerivedSigningPubkey,
+}
+
/// Indicates how [`Bolt12Invoice::signing_pubkey`] was set.
///
/// This is not exported to bindings users as builder patterns don't map outside of move semantics.
impl SigningPubkeyStrategy for ExplicitSigningPubkey {}
impl SigningPubkeyStrategy for DerivedSigningPubkey {}
-impl<'a> InvoiceBuilder<'a, ExplicitSigningPubkey> {
+macro_rules! invoice_explicit_signing_pubkey_builder_methods { ($self: ident, $self_type: ty) => {
+ #[cfg_attr(c_bindings, allow(dead_code))]
pub(super) fn for_offer(
invoice_request: &'a InvoiceRequest, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>,
created_at: Duration, payment_hash: PaymentHash
Self::new(&invoice_request.bytes, contents, ExplicitSigningPubkey {})
}
+ #[cfg_attr(c_bindings, allow(dead_code))]
pub(super) fn for_refund(
refund: &'a Refund, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, created_at: Duration,
payment_hash: PaymentHash, signing_pubkey: PublicKey
Self::new(&refund.bytes, contents, ExplicitSigningPubkey {})
}
-}
-impl<'a> InvoiceBuilder<'a, DerivedSigningPubkey> {
+ /// Builds an unsigned [`Bolt12Invoice`] after checking for valid semantics. It can be signed by
+ /// [`UnsignedBolt12Invoice::sign`].
+ pub fn build($self: $self_type) -> Result<UnsignedBolt12Invoice, Bolt12SemanticError> {
+ #[cfg(feature = "std")] {
+ if $self.invoice.is_offer_or_refund_expired() {
+ return Err(Bolt12SemanticError::AlreadyExpired);
+ }
+ }
+
+ #[cfg(not(feature = "std"))] {
+ if $self.invoice.is_offer_or_refund_expired_no_std($self.invoice.created_at()) {
+ return Err(Bolt12SemanticError::AlreadyExpired);
+ }
+ }
+
+ let Self { invreq_bytes, invoice, .. } = $self;
+ #[cfg(not(c_bindings))] {
+ Ok(UnsignedBolt12Invoice::new(invreq_bytes, invoice))
+ }
+ #[cfg(c_bindings)] {
+ Ok(UnsignedBolt12Invoice::new(invreq_bytes, invoice.clone()))
+ }
+ }
+} }
+
+macro_rules! invoice_derived_signing_pubkey_builder_methods { ($self: ident, $self_type: ty) => {
+ #[cfg_attr(c_bindings, allow(dead_code))]
pub(super) fn for_offer_using_keys(
invoice_request: &'a InvoiceRequest, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>,
created_at: Duration, payment_hash: PaymentHash, keys: KeyPair
Self::new(&invoice_request.bytes, contents, DerivedSigningPubkey(keys))
}
+ #[cfg_attr(c_bindings, allow(dead_code))]
pub(super) fn for_refund_using_keys(
refund: &'a Refund, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, created_at: Duration,
payment_hash: PaymentHash, keys: KeyPair,
Self::new(&refund.bytes, contents, DerivedSigningPubkey(keys))
}
-}
-impl<'a, S: SigningPubkeyStrategy> InvoiceBuilder<'a, S> {
+ /// Builds a signed [`Bolt12Invoice`] after checking for valid semantics.
+ pub fn build_and_sign<T: secp256k1::Signing>(
+ $self: $self_type, secp_ctx: &Secp256k1<T>
+ ) -> Result<Bolt12Invoice, Bolt12SemanticError> {
+ #[cfg(feature = "std")] {
+ if $self.invoice.is_offer_or_refund_expired() {
+ return Err(Bolt12SemanticError::AlreadyExpired);
+ }
+ }
+
+ #[cfg(not(feature = "std"))] {
+ if $self.invoice.is_offer_or_refund_expired_no_std($self.invoice.created_at()) {
+ return Err(Bolt12SemanticError::AlreadyExpired);
+ }
+ }
+
+ let Self {
+ invreq_bytes, invoice, signing_pubkey_strategy: DerivedSigningPubkey(keys)
+ } = $self;
+ #[cfg(not(c_bindings))]
+ let unsigned_invoice = UnsignedBolt12Invoice::new(invreq_bytes, invoice);
+ #[cfg(c_bindings)]
+ let mut unsigned_invoice = UnsignedBolt12Invoice::new(invreq_bytes, invoice.clone());
+
+ let invoice = unsigned_invoice
+ .sign(|message: &UnsignedBolt12Invoice|
+ Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
+ )
+ .unwrap();
+ Ok(invoice)
+ }
+} }
+
+macro_rules! invoice_builder_methods { (
+ $self: ident, $self_type: ty, $return_type: ty, $return_value: expr, $type_param: ty $(, $self_mut: tt)?
+) => {
pub(crate) fn amount_msats(
invoice_request: &InvoiceRequest
) -> Result<u64, Bolt12SemanticError> {
}
}
+ #[cfg_attr(c_bindings, allow(dead_code))]
fn fields(
payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, created_at: Duration,
payment_hash: PaymentHash, amount_msats: u64, signing_pubkey: PublicKey
}
}
+ #[cfg_attr(c_bindings, allow(dead_code))]
fn new(
- invreq_bytes: &'a Vec<u8>, contents: InvoiceContents, signing_pubkey_strategy: S
+ invreq_bytes: &'a Vec<u8>, contents: InvoiceContents, signing_pubkey_strategy: $type_param
) -> Result<Self, Bolt12SemanticError> {
if contents.fields().payment_paths.is_empty() {
return Err(Bolt12SemanticError::MissingPaths);
/// [`Bolt12Invoice::is_expired`].
///
/// Successive calls to this method will override the previous setting.
- pub fn relative_expiry(mut self, relative_expiry_secs: u32) -> Self {
+ pub fn relative_expiry($($self_mut)* $self: $self_type, relative_expiry_secs: u32) -> $return_type {
let relative_expiry = Duration::from_secs(relative_expiry_secs as u64);
- self.invoice.fields_mut().relative_expiry = Some(relative_expiry);
- self
+ $self.invoice.fields_mut().relative_expiry = Some(relative_expiry);
+ $return_value
}
/// Adds a P2WSH address to [`Bolt12Invoice::fallbacks`].
///
/// Successive calls to this method will add another address. Caller is responsible for not
/// adding duplicate addresses and only calling if capable of receiving to P2WSH addresses.
- pub fn fallback_v0_p2wsh(mut self, script_hash: &WScriptHash) -> Self {
+ pub fn fallback_v0_p2wsh($($self_mut)* $self: $self_type, script_hash: &WScriptHash) -> $return_type {
+ use bitcoin::hashes::Hash;
let address = FallbackAddress {
version: WitnessVersion::V0.to_num(),
program: Vec::from(script_hash.to_byte_array()),
};
- self.invoice.fields_mut().fallbacks.get_or_insert_with(Vec::new).push(address);
- self
+ $self.invoice.fields_mut().fallbacks.get_or_insert_with(Vec::new).push(address);
+ $return_value
}
/// Adds a P2WPKH address to [`Bolt12Invoice::fallbacks`].
///
/// Successive calls to this method will add another address. Caller is responsible for not
/// adding duplicate addresses and only calling if capable of receiving to P2WPKH addresses.
- pub fn fallback_v0_p2wpkh(mut self, pubkey_hash: &WPubkeyHash) -> Self {
+ pub fn fallback_v0_p2wpkh($($self_mut)* $self: $self_type, pubkey_hash: &WPubkeyHash) -> $return_type {
+ use bitcoin::hashes::Hash;
let address = FallbackAddress {
version: WitnessVersion::V0.to_num(),
program: Vec::from(pubkey_hash.to_byte_array()),
};
- self.invoice.fields_mut().fallbacks.get_or_insert_with(Vec::new).push(address);
- self
+ $self.invoice.fields_mut().fallbacks.get_or_insert_with(Vec::new).push(address);
+ $return_value
}
/// Adds a P2TR address to [`Bolt12Invoice::fallbacks`].
///
/// Successive calls to this method will add another address. Caller is responsible for not
/// adding duplicate addresses and only calling if capable of receiving to P2TR addresses.
- pub fn fallback_v1_p2tr_tweaked(mut self, output_key: &TweakedPublicKey) -> Self {
+ pub fn fallback_v1_p2tr_tweaked($($self_mut)* $self: $self_type, output_key: &TweakedPublicKey) -> $return_type {
let address = FallbackAddress {
version: WitnessVersion::V1.to_num(),
program: Vec::from(&output_key.serialize()[..]),
};
- self.invoice.fields_mut().fallbacks.get_or_insert_with(Vec::new).push(address);
- self
+ $self.invoice.fields_mut().fallbacks.get_or_insert_with(Vec::new).push(address);
+ $return_value
}
/// Sets [`Bolt12Invoice::invoice_features`] to indicate MPP may be used. Otherwise, MPP is
/// disallowed.
- pub fn allow_mpp(mut self) -> Self {
- self.invoice.fields_mut().features.set_basic_mpp_optional();
- self
+ pub fn allow_mpp($($self_mut)* $self: $self_type) -> $return_type {
+ $self.invoice.fields_mut().features.set_basic_mpp_optional();
+ $return_value
}
-}
+} }
impl<'a> InvoiceBuilder<'a, ExplicitSigningPubkey> {
- /// Builds an unsigned [`Bolt12Invoice`] after checking for valid semantics. It can be signed by
- /// [`UnsignedBolt12Invoice::sign`].
- pub fn build(self) -> Result<UnsignedBolt12Invoice, Bolt12SemanticError> {
- #[cfg(feature = "std")] {
- if self.invoice.is_offer_or_refund_expired() {
- return Err(Bolt12SemanticError::AlreadyExpired);
- }
- }
+ invoice_explicit_signing_pubkey_builder_methods!(self, Self);
+}
- #[cfg(not(feature = "std"))] {
- if self.invoice.is_offer_or_refund_expired_no_std(self.invoice.created_at()) {
- return Err(Bolt12SemanticError::AlreadyExpired);
- }
- }
+impl<'a> InvoiceBuilder<'a, DerivedSigningPubkey> {
+ invoice_derived_signing_pubkey_builder_methods!(self, Self);
+}
- let InvoiceBuilder { invreq_bytes, invoice, .. } = self;
- Ok(UnsignedBolt12Invoice::new(invreq_bytes, invoice))
- }
+impl<'a, S: SigningPubkeyStrategy> InvoiceBuilder<'a, S> {
+ invoice_builder_methods!(self, Self, Self, self, S, mut);
}
-impl<'a> InvoiceBuilder<'a, DerivedSigningPubkey> {
- /// Builds a signed [`Bolt12Invoice`] after checking for valid semantics.
- pub fn build_and_sign<T: secp256k1::Signing>(
- self, secp_ctx: &Secp256k1<T>
- ) -> Result<Bolt12Invoice, Bolt12SemanticError> {
- #[cfg(feature = "std")] {
- if self.invoice.is_offer_or_refund_expired() {
- return Err(Bolt12SemanticError::AlreadyExpired);
- }
- }
+#[cfg(all(c_bindings, not(test)))]
+impl<'a> InvoiceWithExplicitSigningPubkeyBuilder<'a> {
+ invoice_explicit_signing_pubkey_builder_methods!(self, &mut Self);
+ invoice_builder_methods!(self, &mut Self, (), (), ExplicitSigningPubkey);
+}
- #[cfg(not(feature = "std"))] {
- if self.invoice.is_offer_or_refund_expired_no_std(self.invoice.created_at()) {
- return Err(Bolt12SemanticError::AlreadyExpired);
- }
+#[cfg(all(c_bindings, test))]
+impl<'a> InvoiceWithExplicitSigningPubkeyBuilder<'a> {
+ invoice_explicit_signing_pubkey_builder_methods!(self, &mut Self);
+ invoice_builder_methods!(self, &mut Self, &mut Self, self, ExplicitSigningPubkey);
+}
+
+#[cfg(all(c_bindings, not(test)))]
+impl<'a> InvoiceWithDerivedSigningPubkeyBuilder<'a> {
+ invoice_derived_signing_pubkey_builder_methods!(self, &mut Self);
+ invoice_builder_methods!(self, &mut Self, (), (), DerivedSigningPubkey);
+}
+
+#[cfg(all(c_bindings, test))]
+impl<'a> InvoiceWithDerivedSigningPubkeyBuilder<'a> {
+ invoice_derived_signing_pubkey_builder_methods!(self, &mut Self);
+ invoice_builder_methods!(self, &mut Self, &mut Self, self, DerivedSigningPubkey);
+}
+
+#[cfg(c_bindings)]
+impl<'a> From<InvoiceWithExplicitSigningPubkeyBuilder<'a>>
+for InvoiceBuilder<'a, ExplicitSigningPubkey> {
+ fn from(builder: InvoiceWithExplicitSigningPubkeyBuilder<'a>) -> Self {
+ let InvoiceWithExplicitSigningPubkeyBuilder {
+ invreq_bytes, invoice, signing_pubkey_strategy,
+ } = builder;
+
+ Self {
+ invreq_bytes, invoice, signing_pubkey_strategy,
}
+ }
+}
- let InvoiceBuilder {
- invreq_bytes, invoice, signing_pubkey_strategy: DerivedSigningPubkey(keys)
- } = self;
- let unsigned_invoice = UnsignedBolt12Invoice::new(invreq_bytes, invoice);
+#[cfg(c_bindings)]
+impl<'a> From<InvoiceWithDerivedSigningPubkeyBuilder<'a>>
+for InvoiceBuilder<'a, DerivedSigningPubkey> {
+ fn from(builder: InvoiceWithDerivedSigningPubkeyBuilder<'a>) -> Self {
+ let InvoiceWithDerivedSigningPubkeyBuilder {
+ invreq_bytes, invoice, signing_pubkey_strategy,
+ } = builder;
- let invoice = unsigned_invoice
- .sign::<_, Infallible>(
- |message| Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
- )
- .unwrap();
- Ok(invoice)
+ Self {
+ invreq_bytes, invoice, signing_pubkey_strategy,
+ }
}
}
tagged_hash: TaggedHash,
}
+/// A function for signing an [`UnsignedBolt12Invoice`].
+pub trait SignBolt12InvoiceFn {
+ /// Signs a [`TaggedHash`] computed over the merkle root of `message`'s TLV stream.
+ fn sign_invoice(&self, message: &UnsignedBolt12Invoice) -> Result<Signature, ()>;
+}
+
+impl<F> SignBolt12InvoiceFn for F
+where
+ F: Fn(&UnsignedBolt12Invoice) -> Result<Signature, ()>,
+{
+ fn sign_invoice(&self, message: &UnsignedBolt12Invoice) -> Result<Signature, ()> {
+ self(message)
+ }
+}
+
+impl<F> SignFn<UnsignedBolt12Invoice> for F
+where
+ F: SignBolt12InvoiceFn,
+{
+ fn sign(&self, message: &UnsignedBolt12Invoice) -> Result<Signature, ()> {
+ self.sign_invoice(message)
+ }
+}
+
impl UnsignedBolt12Invoice {
fn new(invreq_bytes: &[u8], contents: InvoiceContents) -> Self {
// Use the invoice_request bytes instead of the invoice_request TLV stream as the latter may
let mut bytes = Vec::new();
unsigned_tlv_stream.write(&mut bytes).unwrap();
- let tagged_hash = TaggedHash::new(SIGNATURE_TAG, &bytes);
+ let tagged_hash = TaggedHash::from_valid_tlv_stream_bytes(SIGNATURE_TAG, &bytes);
Self { bytes, contents, tagged_hash }
}
pub fn tagged_hash(&self) -> &TaggedHash {
&self.tagged_hash
}
+}
+macro_rules! unsigned_invoice_sign_method { ($self: ident, $self_type: ty $(, $self_mut: tt)?) => {
/// Signs the [`TaggedHash`] of the invoice using the given function.
///
/// Note: The hash computation may have included unknown, odd TLV records.
- ///
- /// This is not exported to bindings users as functions aren't currently mapped.
- pub fn sign<F, E>(mut self, sign: F) -> Result<Bolt12Invoice, SignError<E>>
- where
- F: FnOnce(&Self) -> Result<Signature, E>
- {
- let pubkey = self.contents.fields().signing_pubkey;
- let signature = merkle::sign_message(sign, &self, pubkey)?;
+ pub fn sign<F: SignBolt12InvoiceFn>(
+ $($self_mut)* $self: $self_type, sign: F
+ ) -> Result<Bolt12Invoice, SignError> {
+ let pubkey = $self.contents.fields().signing_pubkey;
+ let signature = merkle::sign_message(sign, &$self, pubkey)?;
// Append the signature TLV record to the bytes.
let signature_tlv_stream = SignatureTlvStreamRef {
signature: Some(&signature),
};
- signature_tlv_stream.write(&mut self.bytes).unwrap();
+ signature_tlv_stream.write(&mut $self.bytes).unwrap();
Ok(Bolt12Invoice {
- bytes: self.bytes,
- contents: self.contents,
+ #[cfg(not(c_bindings))]
+ bytes: $self.bytes,
+ #[cfg(c_bindings)]
+ bytes: $self.bytes.clone(),
+ #[cfg(not(c_bindings))]
+ contents: $self.contents,
+ #[cfg(c_bindings)]
+ contents: $self.contents.clone(),
signature,
- tagged_hash: self.tagged_hash,
+ #[cfg(not(c_bindings))]
+ tagged_hash: $self.tagged_hash,
+ #[cfg(c_bindings)]
+ tagged_hash: $self.tagged_hash.clone(),
})
}
+} }
+
+#[cfg(not(c_bindings))]
+impl UnsignedBolt12Invoice {
+ unsigned_invoice_sign_method!(self, Self, mut);
+}
+
+#[cfg(c_bindings)]
+impl UnsignedBolt12Invoice {
+ unsigned_invoice_sign_method!(self, &mut Self);
}
impl AsRef<TaggedHash> for UnsignedBolt12Invoice {
/// [`Refund`]: crate::offers::refund::Refund
/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
#[derive(Clone, Debug)]
-#[cfg_attr(test, derive(PartialEq))]
pub struct Bolt12Invoice {
bytes: Vec<u8>,
contents: InvoiceContents,
}
}
+impl PartialEq for Bolt12Invoice {
+ fn eq(&self, other: &Self) -> bool {
+ self.bytes.eq(&other.bytes)
+ }
+}
+
+impl Eq for Bolt12Invoice {}
+
+impl Hash for Bolt12Invoice {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.bytes.hash(state);
+ }
+}
+
impl InvoiceContents {
/// Whether the original offer or refund has expired.
#[cfg(feature = "std")]
(payer_tlv_stream, offer_tlv_stream, invoice_request_tlv_stream, invoice_tlv_stream)
)?;
- let tagged_hash = TaggedHash::new(SIGNATURE_TAG, &bytes);
+ let tagged_hash = TaggedHash::from_valid_tlv_stream_bytes(SIGNATURE_TAG, &bytes);
Ok(UnsignedBolt12Invoice { bytes, contents, tagged_hash })
}
None => return Err(Bolt12ParseError::InvalidSemantics(Bolt12SemanticError::MissingSignature)),
Some(signature) => signature,
};
- let tagged_hash = TaggedHash::new(SIGNATURE_TAG, &bytes);
+ let tagged_hash = TaggedHash::from_valid_tlv_stream_bytes(SIGNATURE_TAG, &bytes);
let pubkey = contents.fields().signing_pubkey;
merkle::verify_signature(&signature, &tagged_hash, pubkey)?;
use bitcoin::secp256k1::{Message, Secp256k1, XOnlyPublicKey, self};
use bitcoin::address::{Address, Payload, WitnessProgram, WitnessVersion};
use bitcoin::key::TweakedPublicKey;
- use core::convert::TryFrom;
+
use core::time::Duration;
- use crate::blinded_path::{BlindedHop, BlindedPath};
+
+ use crate::blinded_path::{BlindedHop, BlindedPath, IntroductionNode};
use crate::sign::KeyMaterial;
use crate::ln::features::{Bolt12InvoiceFeatures, InvoiceRequestFeatures, OfferFeatures};
use crate::ln::inbound_payment::ExpandedKey;
use crate::ln::msgs::DecodeError;
use crate::offers::invoice_request::InvoiceRequestTlvStreamRef;
use crate::offers::merkle::{SignError, SignatureTlvStreamRef, TaggedHash, self};
- use crate::offers::offer::{Amount, OfferBuilder, OfferTlvStreamRef, Quantity};
+ use crate::offers::offer::{Amount, OfferTlvStreamRef, Quantity};
+ use crate::prelude::*;
+ #[cfg(not(c_bindings))]
+ use {
+ crate::offers::offer::OfferBuilder,
+ crate::offers::refund::RefundBuilder,
+ };
+ #[cfg(c_bindings)]
+ use {
+ crate::offers::offer::OfferWithExplicitMetadataBuilder as OfferBuilder,
+ crate::offers::refund::RefundMaybeWithDerivedMetadataBuilder as RefundBuilder,
+ };
use crate::offers::parse::{Bolt12ParseError, Bolt12SemanticError};
use crate::offers::payer::PayerTlvStreamRef;
- use crate::offers::refund::RefundBuilder;
use crate::offers::test_utils::*;
use crate::util::ser::{BigSize, Iterable, Writeable};
use crate::util::string::PrintableString;
},
}
+ #[cfg(c_bindings)]
+ let mut unsigned_invoice = unsigned_invoice;
let invoice = unsigned_invoice.sign(recipient_sign).unwrap();
let mut buffer = Vec::new();
assert_eq!(invoice.invoice_features(), &Bolt12InvoiceFeatures::empty());
assert_eq!(invoice.signing_pubkey(), recipient_pubkey());
- let message = TaggedHash::new(SIGNATURE_TAG, &invoice.bytes);
+ let message = TaggedHash::from_valid_tlv_stream_bytes(SIGNATURE_TAG, &invoice.bytes);
assert!(merkle::verify_signature(&invoice.signature, &message, recipient_pubkey()).is_ok());
let digest = Message::from_slice(&invoice.signable_hash()).unwrap();
assert_eq!(invoice.invoice_features(), &Bolt12InvoiceFeatures::empty());
assert_eq!(invoice.signing_pubkey(), recipient_pubkey());
- let message = TaggedHash::new(SIGNATURE_TAG, &invoice.bytes);
+ let message = TaggedHash::from_valid_tlv_stream_bytes(SIGNATURE_TAG, &invoice.bytes);
assert!(merkle::verify_signature(&invoice.signature, &message, recipient_pubkey()).is_ok());
assert_eq!(
let secp_ctx = Secp256k1::new();
let blinded_path = BlindedPath {
- introduction_node_id: pubkey(40),
+ introduction_node: IntroductionNode::NodeId(pubkey(40)),
blinding_point: pubkey(41),
blinded_hops: vec![
BlindedHop { blinded_node_id: pubkey(42), encrypted_payload: vec![0; 43] },
],
};
+ #[cfg(c_bindings)]
+ use crate::offers::offer::OfferWithDerivedMetadataBuilder as OfferBuilder;
let offer = OfferBuilder
::deriving_signing_pubkey(desc, node_id, &expanded_key, &entropy, &secp_ctx)
.amount_msats(1000)
.sign(payer_sign).unwrap()
.respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
.build().unwrap()
- .sign(|_| Err(()))
+ .sign(fail_sign)
{
Ok(_) => panic!("expected error"),
- Err(e) => assert_eq!(e, SignError::Signing(())),
+ Err(e) => assert_eq!(e, SignError::Signing),
}
match OfferBuilder::new("foo".into(), recipient_pubkey())
.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap()
.sign(payer_sign).unwrap();
+ #[cfg(not(c_bindings))]
+ let invoice_builder = invoice_request
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap();
+ #[cfg(c_bindings)]
let mut invoice_builder = invoice_request
- .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap();
+ let invoice_builder = invoice_builder
.fallback_v0_p2wsh(&script.wscript_hash())
.fallback_v0_p2wpkh(&pubkey.wpubkey_hash().unwrap())
.fallback_v1_p2tr_tweaked(&tweaked_pubkey);
+ #[cfg(not(c_bindings))]
+ let mut invoice_builder = invoice_builder;
// Only standard addresses will be included.
let fallbacks = invoice_builder.invoice.fields_mut().fallbacks.as_mut().unwrap();
use crate::io;
use crate::ln::msgs::DecodeError;
+use crate::offers::merkle::SignError;
use crate::offers::parse::Bolt12SemanticError;
use crate::util::ser::{HighZeroBytesDroppedBigSize, Readable, WithoutLength, Writeable, Writer};
use crate::util::string::UntrustedString;
+#[allow(unused_imports)]
use crate::prelude::*;
/// An error in response to an [`InvoiceRequest`] or an [`Bolt12Invoice`].
}
}
+impl From<SignError> for InvoiceError {
+ fn from(error: SignError) -> Self {
+ let message = match error {
+ SignError::Signing => "Failed signing invoice",
+ SignError::Verification(_) => "Failed invoice signature verification",
+ };
+ InvoiceError {
+ erroneous_field: None,
+ message: UntrustedString(message.to_string()),
+ }
+ }
+}
+
#[cfg(test)]
mod tests {
use super::{ErroneousField, InvoiceError};
//!
//! use bitcoin::network::constants::Network;
//! use bitcoin::secp256k1::{KeyPair, PublicKey, Secp256k1, SecretKey};
-//! use core::convert::Infallible;
//! use lightning::ln::features::OfferFeatures;
+//! use lightning::offers::invoice_request::UnsignedInvoiceRequest;
//! use lightning::offers::offer::Offer;
//! use lightning::util::ser::Writeable;
//!
//! let pubkey = PublicKey::from(keys);
//! let mut buffer = Vec::new();
//!
+//! # use lightning::offers::invoice_request::{ExplicitPayerId, InvoiceRequestBuilder};
+//! # <InvoiceRequestBuilder<ExplicitPayerId, _>>::from(
//! "lno1qcp4256ypq"
//! .parse::<Offer>()?
//! .request_invoice(vec![42; 64], pubkey)?
+//! # )
//! .chain(Network::Testnet)?
//! .amount_msats(1000)?
//! .quantity(5)?
//! .payer_note("foo".to_string())
//! .build()?
-//! .sign::<_, Infallible>(
-//! |message| Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
+//! .sign(|message: &UnsignedInvoiceRequest|
+//! Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
//! )
//! .expect("failed verifying signature")
//! .write(&mut buffer)
use bitcoin::network::constants::Network;
use bitcoin::secp256k1::{KeyPair, PublicKey, Secp256k1, self};
use bitcoin::secp256k1::schnorr::Signature;
-use core::convert::{AsRef, Infallible, TryFrom};
use core::ops::Deref;
use crate::sign::EntropySource;
use crate::io;
use crate::ln::features::InvoiceRequestFeatures;
use crate::ln::inbound_payment::{ExpandedKey, IV_LEN, Nonce};
use crate::ln::msgs::DecodeError;
-use crate::offers::invoice::{BlindedPayInfo, DerivedSigningPubkey, ExplicitSigningPubkey, InvoiceBuilder};
-use crate::offers::merkle::{SignError, SignatureTlvStream, SignatureTlvStreamRef, TaggedHash, self};
-use crate::offers::offer::{Offer, OfferContents, OfferTlvStream, OfferTlvStreamRef};
+use crate::offers::invoice::BlindedPayInfo;
+use crate::offers::merkle::{SignError, SignFn, SignatureTlvStream, SignatureTlvStreamRef, TaggedHash, self};
+use crate::offers::offer::{Offer, OfferContents, OfferId, OfferTlvStream, OfferTlvStreamRef};
use crate::offers::parse::{Bolt12ParseError, ParsedMessage, Bolt12SemanticError};
use crate::offers::payer::{PayerContents, PayerTlvStream, PayerTlvStreamRef};
use crate::offers::signer::{Metadata, MetadataMaterial};
-use crate::util::ser::{HighZeroBytesDroppedBigSize, SeekReadable, WithoutLength, Writeable, Writer};
-use crate::util::string::PrintableString;
-
+use crate::util::ser::{HighZeroBytesDroppedBigSize, Readable, SeekReadable, WithoutLength, Writeable, Writer};
+use crate::util::string::{PrintableString, UntrustedString};
+
+#[cfg(not(c_bindings))]
+use {
+ crate::offers::invoice::{DerivedSigningPubkey, ExplicitSigningPubkey, InvoiceBuilder},
+};
+#[cfg(c_bindings)]
+use {
+ crate::offers::invoice::{InvoiceWithDerivedSigningPubkeyBuilder, InvoiceWithExplicitSigningPubkeyBuilder},
+};
+
+#[allow(unused_imports)]
use crate::prelude::*;
/// Tag for the hash function used when signing an [`InvoiceRequest`]'s merkle root.
secp_ctx: Option<&'b Secp256k1<T>>,
}
+/// Builds an [`InvoiceRequest`] from an [`Offer`] for the "offer to be paid" flow.
+///
+/// See [module-level documentation] for usage.
+///
+/// [module-level documentation]: self
+#[cfg(c_bindings)]
+pub struct InvoiceRequestWithExplicitPayerIdBuilder<'a, 'b> {
+ offer: &'a Offer,
+ invoice_request: InvoiceRequestContentsWithoutPayerId,
+ payer_id: Option<PublicKey>,
+ payer_id_strategy: core::marker::PhantomData<ExplicitPayerId>,
+ secp_ctx: Option<&'b Secp256k1<secp256k1::All>>,
+}
+
+/// Builds an [`InvoiceRequest`] from an [`Offer`] for the "offer to be paid" flow.
+///
+/// See [module-level documentation] for usage.
+///
+/// [module-level documentation]: self
+#[cfg(c_bindings)]
+pub struct InvoiceRequestWithDerivedPayerIdBuilder<'a, 'b> {
+ offer: &'a Offer,
+ invoice_request: InvoiceRequestContentsWithoutPayerId,
+ payer_id: Option<PublicKey>,
+ payer_id_strategy: core::marker::PhantomData<DerivedPayerId>,
+ secp_ctx: Option<&'b Secp256k1<secp256k1::All>>,
+}
+
/// Indicates how [`InvoiceRequest::payer_id`] will be set.
///
/// This is not exported to bindings users as builder patterns don't map outside of move semantics.
impl PayerIdStrategy for ExplicitPayerId {}
impl PayerIdStrategy for DerivedPayerId {}
-impl<'a, 'b, T: secp256k1::Signing> InvoiceRequestBuilder<'a, 'b, ExplicitPayerId, T> {
+macro_rules! invoice_request_explicit_payer_id_builder_methods { ($self: ident, $self_type: ty) => {
+ #[cfg_attr(c_bindings, allow(dead_code))]
pub(super) fn new(offer: &'a Offer, metadata: Vec<u8>, payer_id: PublicKey) -> Self {
Self {
offer,
}
}
+ #[cfg_attr(c_bindings, allow(dead_code))]
pub(super) fn deriving_metadata<ES: Deref>(
offer: &'a Offer, payer_id: PublicKey, expanded_key: &ExpandedKey, entropy_source: ES,
payment_id: PaymentId,
secp_ctx: None,
}
}
-}
-impl<'a, 'b, T: secp256k1::Signing> InvoiceRequestBuilder<'a, 'b, DerivedPayerId, T> {
+ /// Builds an unsigned [`InvoiceRequest`] after checking for valid semantics. It can be signed
+ /// by [`UnsignedInvoiceRequest::sign`].
+ pub fn build($self: $self_type) -> Result<UnsignedInvoiceRequest, Bolt12SemanticError> {
+ let (unsigned_invoice_request, keys, _) = $self.build_with_checks()?;
+ debug_assert!(keys.is_none());
+ Ok(unsigned_invoice_request)
+ }
+} }
+
+macro_rules! invoice_request_derived_payer_id_builder_methods { (
+ $self: ident, $self_type: ty, $secp_context: ty
+) => {
+ #[cfg_attr(c_bindings, allow(dead_code))]
pub(super) fn deriving_payer_id<ES: Deref>(
offer: &'a Offer, expanded_key: &ExpandedKey, entropy_source: ES,
- secp_ctx: &'b Secp256k1<T>, payment_id: PaymentId
+ secp_ctx: &'b Secp256k1<$secp_context>, payment_id: PaymentId
) -> Self where ES::Target: EntropySource {
let nonce = Nonce::from_entropy_source(entropy_source);
let payment_id = Some(payment_id);
secp_ctx: Some(secp_ctx),
}
}
-}
-impl<'a, 'b, P: PayerIdStrategy, T: secp256k1::Signing> InvoiceRequestBuilder<'a, 'b, P, T> {
+ /// Builds a signed [`InvoiceRequest`] after checking for valid semantics.
+ pub fn build_and_sign($self: $self_type) -> Result<InvoiceRequest, Bolt12SemanticError> {
+ let (unsigned_invoice_request, keys, secp_ctx) = $self.build_with_checks()?;
+ #[cfg(c_bindings)]
+ let mut unsigned_invoice_request = unsigned_invoice_request;
+ debug_assert!(keys.is_some());
+
+ let secp_ctx = secp_ctx.unwrap();
+ let keys = keys.unwrap();
+ let invoice_request = unsigned_invoice_request
+ .sign(|message: &UnsignedInvoiceRequest|
+ Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
+ )
+ .unwrap();
+ Ok(invoice_request)
+ }
+} }
+
+macro_rules! invoice_request_builder_methods { (
+ $self: ident, $self_type: ty, $return_type: ty, $return_value: expr, $secp_context: ty $(, $self_mut: tt)?
+) => {
+ #[cfg_attr(c_bindings, allow(dead_code))]
fn create_contents(offer: &Offer, metadata: Metadata) -> InvoiceRequestContentsWithoutPayerId {
let offer = offer.contents.clone();
InvoiceRequestContentsWithoutPayerId {
/// by the offer.
///
/// Successive calls to this method will override the previous setting.
- pub fn chain(self, network: Network) -> Result<Self, Bolt12SemanticError> {
- self.chain_hash(ChainHash::using_genesis_block(network))
+ pub fn chain($self: $self_type, network: Network) -> Result<$return_type, Bolt12SemanticError> {
+ $self.chain_hash(ChainHash::using_genesis_block(network))
}
/// Sets the [`InvoiceRequest::chain`] for paying an invoice. If not called, the chain hash of
/// offer.
///
/// Successive calls to this method will override the previous setting.
- pub(crate) fn chain_hash(mut self, chain: ChainHash) -> Result<Self, Bolt12SemanticError> {
- if !self.offer.supports_chain(chain) {
+ pub(crate) fn chain_hash($($self_mut)* $self: $self_type, chain: ChainHash) -> Result<$return_type, Bolt12SemanticError> {
+ if !$self.offer.supports_chain(chain) {
return Err(Bolt12SemanticError::UnsupportedChain);
}
- self.invoice_request.chain = Some(chain);
- Ok(self)
+ $self.invoice_request.chain = Some(chain);
+ Ok($return_value)
}
/// Sets the [`InvoiceRequest::amount_msats`] for paying an invoice. Errors if `amount_msats` is
/// Successive calls to this method will override the previous setting.
///
/// [`quantity`]: Self::quantity
- pub fn amount_msats(mut self, amount_msats: u64) -> Result<Self, Bolt12SemanticError> {
- self.invoice_request.offer.check_amount_msats_for_quantity(
- Some(amount_msats), self.invoice_request.quantity
+ pub fn amount_msats($($self_mut)* $self: $self_type, amount_msats: u64) -> Result<$return_type, Bolt12SemanticError> {
+ $self.invoice_request.offer.check_amount_msats_for_quantity(
+ Some(amount_msats), $self.invoice_request.quantity
)?;
- self.invoice_request.amount_msats = Some(amount_msats);
- Ok(self)
+ $self.invoice_request.amount_msats = Some(amount_msats);
+ Ok($return_value)
}
/// Sets [`InvoiceRequest::quantity`] of items. If not set, `1` is assumed. Errors if `quantity`
/// does not conform to [`Offer::is_valid_quantity`].
///
/// Successive calls to this method will override the previous setting.
- pub fn quantity(mut self, quantity: u64) -> Result<Self, Bolt12SemanticError> {
- self.invoice_request.offer.check_quantity(Some(quantity))?;
- self.invoice_request.quantity = Some(quantity);
- Ok(self)
+ pub fn quantity($($self_mut)* $self: $self_type, quantity: u64) -> Result<$return_type, Bolt12SemanticError> {
+ $self.invoice_request.offer.check_quantity(Some(quantity))?;
+ $self.invoice_request.quantity = Some(quantity);
+ Ok($return_value)
}
/// Sets the [`InvoiceRequest::payer_note`].
///
/// Successive calls to this method will override the previous setting.
- pub fn payer_note(mut self, payer_note: String) -> Self {
- self.invoice_request.payer_note = Some(payer_note);
- self
+ pub fn payer_note($($self_mut)* $self: $self_type, payer_note: String) -> $return_type {
+ $self.invoice_request.payer_note = Some(payer_note);
+ $return_value
}
- fn build_with_checks(mut self) -> Result<
- (UnsignedInvoiceRequest, Option<KeyPair>, Option<&'b Secp256k1<T>>),
+ fn build_with_checks($($self_mut)* $self: $self_type) -> Result<
+ (UnsignedInvoiceRequest, Option<KeyPair>, Option<&'b Secp256k1<$secp_context>>),
Bolt12SemanticError
> {
#[cfg(feature = "std")] {
- if self.offer.is_expired() {
+ if $self.offer.is_expired() {
return Err(Bolt12SemanticError::AlreadyExpired);
}
}
- let chain = self.invoice_request.chain();
- if !self.offer.supports_chain(chain) {
+ let chain = $self.invoice_request.chain();
+ if !$self.offer.supports_chain(chain) {
return Err(Bolt12SemanticError::UnsupportedChain);
}
- if chain == self.offer.implied_chain() {
- self.invoice_request.chain = None;
+ if chain == $self.offer.implied_chain() {
+ $self.invoice_request.chain = None;
}
- if self.offer.amount().is_none() && self.invoice_request.amount_msats.is_none() {
+ if $self.offer.amount().is_none() && $self.invoice_request.amount_msats.is_none() {
return Err(Bolt12SemanticError::MissingAmount);
}
- self.invoice_request.offer.check_quantity(self.invoice_request.quantity)?;
- self.invoice_request.offer.check_amount_msats_for_quantity(
- self.invoice_request.amount_msats, self.invoice_request.quantity
+ $self.invoice_request.offer.check_quantity($self.invoice_request.quantity)?;
+ $self.invoice_request.offer.check_amount_msats_for_quantity(
+ $self.invoice_request.amount_msats, $self.invoice_request.quantity
)?;
- Ok(self.build_without_checks())
+ Ok($self.build_without_checks())
}
- fn build_without_checks(mut self) ->
- (UnsignedInvoiceRequest, Option<KeyPair>, Option<&'b Secp256k1<T>>)
+ fn build_without_checks($($self_mut)* $self: $self_type) ->
+ (UnsignedInvoiceRequest, Option<KeyPair>, Option<&'b Secp256k1<$secp_context>>)
{
// Create the metadata for stateless verification of a Bolt12Invoice.
let mut keys = None;
- let secp_ctx = self.secp_ctx.clone();
- if self.invoice_request.payer.0.has_derivation_material() {
- let mut metadata = core::mem::take(&mut self.invoice_request.payer.0);
+ let secp_ctx = $self.secp_ctx.clone();
+ if $self.invoice_request.payer.0.has_derivation_material() {
+ let mut metadata = core::mem::take(&mut $self.invoice_request.payer.0);
- let mut tlv_stream = self.invoice_request.as_tlv_stream();
+ let mut tlv_stream = $self.invoice_request.as_tlv_stream();
debug_assert!(tlv_stream.2.payer_id.is_none());
tlv_stream.0.metadata = None;
if !metadata.derives_payer_keys() {
- tlv_stream.2.payer_id = self.payer_id.as_ref();
+ tlv_stream.2.payer_id = $self.payer_id.as_ref();
}
- let (derived_metadata, derived_keys) = metadata.derive_from(tlv_stream, self.secp_ctx);
+ let (derived_metadata, derived_keys) = metadata.derive_from(tlv_stream, $self.secp_ctx);
metadata = derived_metadata;
keys = derived_keys;
if let Some(keys) = keys {
- debug_assert!(self.payer_id.is_none());
- self.payer_id = Some(keys.public_key());
+ debug_assert!($self.payer_id.is_none());
+ $self.payer_id = Some(keys.public_key());
}
- self.invoice_request.payer.0 = metadata;
+ $self.invoice_request.payer.0 = metadata;
}
- debug_assert!(self.invoice_request.payer.0.as_bytes().is_some());
- debug_assert!(self.payer_id.is_some());
- let payer_id = self.payer_id.unwrap();
+ debug_assert!($self.invoice_request.payer.0.as_bytes().is_some());
+ debug_assert!($self.payer_id.is_some());
+ let payer_id = $self.payer_id.unwrap();
let invoice_request = InvoiceRequestContents {
- inner: self.invoice_request,
+ #[cfg(not(c_bindings))]
+ inner: $self.invoice_request,
+ #[cfg(c_bindings)]
+ inner: $self.invoice_request.clone(),
payer_id,
};
- let unsigned_invoice_request = UnsignedInvoiceRequest::new(self.offer, invoice_request);
+ let unsigned_invoice_request = UnsignedInvoiceRequest::new($self.offer, invoice_request);
(unsigned_invoice_request, keys, secp_ctx)
}
-}
+} }
-impl<'a, 'b, T: secp256k1::Signing> InvoiceRequestBuilder<'a, 'b, ExplicitPayerId, T> {
- /// Builds an unsigned [`InvoiceRequest`] after checking for valid semantics. It can be signed
- /// by [`UnsignedInvoiceRequest::sign`].
- pub fn build(self) -> Result<UnsignedInvoiceRequest, Bolt12SemanticError> {
- let (unsigned_invoice_request, keys, _) = self.build_with_checks()?;
- debug_assert!(keys.is_none());
- Ok(unsigned_invoice_request)
+#[cfg(test)]
+macro_rules! invoice_request_builder_test_methods { (
+ $self: ident, $self_type: ty, $return_type: ty, $return_value: expr $(, $self_mut: tt)?
+) => {
+ #[cfg_attr(c_bindings, allow(dead_code))]
+ fn chain_unchecked($($self_mut)* $self: $self_type, network: Network) -> $return_type {
+ let chain = ChainHash::using_genesis_block(network);
+ $self.invoice_request.chain = Some(chain);
+ $return_value
}
-}
-
-impl<'a, 'b, T: secp256k1::Signing> InvoiceRequestBuilder<'a, 'b, DerivedPayerId, T> {
- /// Builds a signed [`InvoiceRequest`] after checking for valid semantics.
- pub fn build_and_sign(self) -> Result<InvoiceRequest, Bolt12SemanticError> {
- let (unsigned_invoice_request, keys, secp_ctx) = self.build_with_checks()?;
- debug_assert!(keys.is_some());
- let secp_ctx = secp_ctx.unwrap();
- let keys = keys.unwrap();
- let invoice_request = unsigned_invoice_request
- .sign::<_, Infallible>(
- |message| Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
- )
- .unwrap();
- Ok(invoice_request)
+ #[cfg_attr(c_bindings, allow(dead_code))]
+ fn amount_msats_unchecked($($self_mut)* $self: $self_type, amount_msats: u64) -> $return_type {
+ $self.invoice_request.amount_msats = Some(amount_msats);
+ $return_value
}
-}
-#[cfg(test)]
-impl<'a, 'b, P: PayerIdStrategy, T: secp256k1::Signing> InvoiceRequestBuilder<'a, 'b, P, T> {
- fn chain_unchecked(mut self, network: Network) -> Self {
- let chain = ChainHash::using_genesis_block(network);
- self.invoice_request.chain = Some(chain);
- self
+ #[cfg_attr(c_bindings, allow(dead_code))]
+ fn features_unchecked($($self_mut)* $self: $self_type, features: InvoiceRequestFeatures) -> $return_type {
+ $self.invoice_request.features = features;
+ $return_value
}
- fn amount_msats_unchecked(mut self, amount_msats: u64) -> Self {
- self.invoice_request.amount_msats = Some(amount_msats);
- self
+ #[cfg_attr(c_bindings, allow(dead_code))]
+ fn quantity_unchecked($($self_mut)* $self: $self_type, quantity: u64) -> $return_type {
+ $self.invoice_request.quantity = Some(quantity);
+ $return_value
}
- fn features_unchecked(mut self, features: InvoiceRequestFeatures) -> Self {
- self.invoice_request.features = features;
- self
+ #[cfg_attr(c_bindings, allow(dead_code))]
+ pub(super) fn build_unchecked($self: $self_type) -> UnsignedInvoiceRequest {
+ $self.build_without_checks().0
}
+} }
+
+impl<'a, 'b, T: secp256k1::Signing> InvoiceRequestBuilder<'a, 'b, ExplicitPayerId, T> {
+ invoice_request_explicit_payer_id_builder_methods!(self, Self);
+}
+
+impl<'a, 'b, T: secp256k1::Signing> InvoiceRequestBuilder<'a, 'b, DerivedPayerId, T> {
+ invoice_request_derived_payer_id_builder_methods!(self, Self, T);
+}
+
+impl<'a, 'b, P: PayerIdStrategy, T: secp256k1::Signing> InvoiceRequestBuilder<'a, 'b, P, T> {
+ invoice_request_builder_methods!(self, Self, Self, self, T, mut);
+
+ #[cfg(test)]
+ invoice_request_builder_test_methods!(self, Self, Self, self, mut);
+}
+
+#[cfg(all(c_bindings, not(test)))]
+impl<'a, 'b> InvoiceRequestWithExplicitPayerIdBuilder<'a, 'b> {
+ invoice_request_explicit_payer_id_builder_methods!(self, &mut Self);
+ invoice_request_builder_methods!(self, &mut Self, (), (), secp256k1::All);
+}
+
+#[cfg(all(c_bindings, test))]
+impl<'a, 'b> InvoiceRequestWithExplicitPayerIdBuilder<'a, 'b> {
+ invoice_request_explicit_payer_id_builder_methods!(self, &mut Self);
+ invoice_request_builder_methods!(self, &mut Self, &mut Self, self, secp256k1::All);
+ invoice_request_builder_test_methods!(self, &mut Self, &mut Self, self);
+}
- fn quantity_unchecked(mut self, quantity: u64) -> Self {
- self.invoice_request.quantity = Some(quantity);
- self
+#[cfg(all(c_bindings, not(test)))]
+impl<'a, 'b> InvoiceRequestWithDerivedPayerIdBuilder<'a, 'b> {
+ invoice_request_derived_payer_id_builder_methods!(self, &mut Self, secp256k1::All);
+ invoice_request_builder_methods!(self, &mut Self, (), (), secp256k1::All);
+}
+
+#[cfg(all(c_bindings, test))]
+impl<'a, 'b> InvoiceRequestWithDerivedPayerIdBuilder<'a, 'b> {
+ invoice_request_derived_payer_id_builder_methods!(self, &mut Self, secp256k1::All);
+ invoice_request_builder_methods!(self, &mut Self, &mut Self, self, secp256k1::All);
+ invoice_request_builder_test_methods!(self, &mut Self, &mut Self, self);
+}
+
+#[cfg(c_bindings)]
+impl<'a, 'b> From<InvoiceRequestWithExplicitPayerIdBuilder<'a, 'b>>
+for InvoiceRequestBuilder<'a, 'b, ExplicitPayerId, secp256k1::All> {
+ fn from(builder: InvoiceRequestWithExplicitPayerIdBuilder<'a, 'b>) -> Self {
+ let InvoiceRequestWithExplicitPayerIdBuilder {
+ offer, invoice_request, payer_id, payer_id_strategy, secp_ctx,
+ } = builder;
+
+ Self {
+ offer, invoice_request, payer_id, payer_id_strategy, secp_ctx,
+ }
}
+}
+
+#[cfg(c_bindings)]
+impl<'a, 'b> From<InvoiceRequestWithDerivedPayerIdBuilder<'a, 'b>>
+for InvoiceRequestBuilder<'a, 'b, DerivedPayerId, secp256k1::All> {
+ fn from(builder: InvoiceRequestWithDerivedPayerIdBuilder<'a, 'b>) -> Self {
+ let InvoiceRequestWithDerivedPayerIdBuilder {
+ offer, invoice_request, payer_id, payer_id_strategy, secp_ctx,
+ } = builder;
- pub(super) fn build_unchecked(self) -> UnsignedInvoiceRequest {
- self.build_without_checks().0
+ Self {
+ offer, invoice_request, payer_id, payer_id_strategy, secp_ctx,
+ }
}
}
tagged_hash: TaggedHash,
}
+/// A function for signing an [`UnsignedInvoiceRequest`].
+pub trait SignInvoiceRequestFn {
+ /// Signs a [`TaggedHash`] computed over the merkle root of `message`'s TLV stream.
+ fn sign_invoice_request(&self, message: &UnsignedInvoiceRequest) -> Result<Signature, ()>;
+}
+
+impl<F> SignInvoiceRequestFn for F
+where
+ F: Fn(&UnsignedInvoiceRequest) -> Result<Signature, ()>,
+{
+ fn sign_invoice_request(&self, message: &UnsignedInvoiceRequest) -> Result<Signature, ()> {
+ self(message)
+ }
+}
+
+impl<F> SignFn<UnsignedInvoiceRequest> for F
+where
+ F: SignInvoiceRequestFn,
+{
+ fn sign(&self, message: &UnsignedInvoiceRequest) -> Result<Signature, ()> {
+ self.sign_invoice_request(message)
+ }
+}
+
impl UnsignedInvoiceRequest {
fn new(offer: &Offer, contents: InvoiceRequestContents) -> Self {
// Use the offer bytes instead of the offer TLV stream as the offer may have contained
let mut bytes = Vec::new();
unsigned_tlv_stream.write(&mut bytes).unwrap();
- let tagged_hash = TaggedHash::new(SIGNATURE_TAG, &bytes);
+ let tagged_hash = TaggedHash::from_valid_tlv_stream_bytes(SIGNATURE_TAG, &bytes);
Self { bytes, contents, tagged_hash }
}
pub fn tagged_hash(&self) -> &TaggedHash {
&self.tagged_hash
}
+}
+macro_rules! unsigned_invoice_request_sign_method { (
+ $self: ident, $self_type: ty $(, $self_mut: tt)?
+) => {
/// Signs the [`TaggedHash`] of the invoice request using the given function.
///
/// Note: The hash computation may have included unknown, odd TLV records.
- ///
- /// This is not exported to bindings users as functions are not yet mapped.
- pub fn sign<F, E>(mut self, sign: F) -> Result<InvoiceRequest, SignError<E>>
- where
- F: FnOnce(&Self) -> Result<Signature, E>
- {
- let pubkey = self.contents.payer_id;
- let signature = merkle::sign_message(sign, &self, pubkey)?;
+ pub fn sign<F: SignInvoiceRequestFn>(
+ $($self_mut)* $self: $self_type, sign: F
+ ) -> Result<InvoiceRequest, SignError> {
+ let pubkey = $self.contents.payer_id;
+ let signature = merkle::sign_message(sign, &$self, pubkey)?;
// Append the signature TLV record to the bytes.
let signature_tlv_stream = SignatureTlvStreamRef {
signature: Some(&signature),
};
- signature_tlv_stream.write(&mut self.bytes).unwrap();
+ signature_tlv_stream.write(&mut $self.bytes).unwrap();
Ok(InvoiceRequest {
- bytes: self.bytes,
- contents: self.contents,
+ #[cfg(not(c_bindings))]
+ bytes: $self.bytes,
+ #[cfg(c_bindings)]
+ bytes: $self.bytes.clone(),
+ #[cfg(not(c_bindings))]
+ contents: $self.contents,
+ #[cfg(c_bindings)]
+ contents: $self.contents.clone(),
signature,
})
}
+} }
+
+#[cfg(not(c_bindings))]
+impl UnsignedInvoiceRequest {
+ unsigned_invoice_request_sign_method!(self, Self, mut);
+}
+
+#[cfg(c_bindings)]
+impl UnsignedInvoiceRequest {
+ unsigned_invoice_request_sign_method!(self, &mut Self);
}
impl AsRef<TaggedHash> for UnsignedInvoiceRequest {
/// ways to respond depending on whether the signing keys were derived.
#[derive(Clone, Debug)]
pub struct VerifiedInvoiceRequest {
+ /// The identifier of the [`Offer`] for which the [`InvoiceRequest`] was made.
+ pub offer_id: OfferId,
+
/// The verified request.
inner: InvoiceRequest,
invoice_request_accessors!(self, self.contents);
}
-impl InvoiceRequest {
- offer_accessors!(self, self.contents.inner.offer);
- invoice_request_accessors!(self, self.contents);
-
- /// Signature of the invoice request using [`payer_id`].
- ///
- /// [`payer_id`]: Self::payer_id
- pub fn signature(&self) -> Signature {
- self.signature
- }
-
+macro_rules! invoice_request_respond_with_explicit_signing_pubkey_methods { (
+ $self: ident, $contents: expr, $builder: ty
+) => {
/// Creates an [`InvoiceBuilder`] for the request with the given required fields and using the
/// [`Duration`] since [`std::time::SystemTime::UNIX_EPOCH`] as the creation time.
///
/// See [`InvoiceRequest::respond_with_no_std`] for further details where the aforementioned
/// creation time is used for the `created_at` parameter.
///
- /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
- ///
/// [`Duration`]: core::time::Duration
#[cfg(feature = "std")]
pub fn respond_with(
- &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash
- ) -> Result<InvoiceBuilder<ExplicitSigningPubkey>, Bolt12SemanticError> {
+ &$self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash
+ ) -> Result<$builder, Bolt12SemanticError> {
let created_at = std::time::SystemTime::now()
.duration_since(std::time::SystemTime::UNIX_EPOCH)
.expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
- self.respond_with_no_std(payment_paths, payment_hash, created_at)
+ $contents.respond_with_no_std(payment_paths, payment_hash, created_at)
}
/// Creates an [`InvoiceBuilder`] for the request with the given required fields.
/// If the originating [`Offer`] was created using [`OfferBuilder::deriving_signing_pubkey`],
/// then use [`InvoiceRequest::verify`] and [`VerifiedInvoiceRequest`] methods instead.
///
- /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
- ///
/// [`Bolt12Invoice::created_at`]: crate::offers::invoice::Bolt12Invoice::created_at
/// [`OfferBuilder::deriving_signing_pubkey`]: crate::offers::offer::OfferBuilder::deriving_signing_pubkey
pub fn respond_with_no_std(
- &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash,
+ &$self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash,
created_at: core::time::Duration
- ) -> Result<InvoiceBuilder<ExplicitSigningPubkey>, Bolt12SemanticError> {
- if self.invoice_request_features().requires_unknown_bits() {
+ ) -> Result<$builder, Bolt12SemanticError> {
+ if $contents.invoice_request_features().requires_unknown_bits() {
return Err(Bolt12SemanticError::UnknownRequiredFeatures);
}
- InvoiceBuilder::for_offer(self, payment_paths, created_at, payment_hash)
+ <$builder>::for_offer(&$contents, payment_paths, created_at, payment_hash)
}
+} }
+macro_rules! invoice_request_verify_method { ($self: ident, $self_type: ty) => {
/// Verifies that the request was for an offer created using the given key. Returns the verified
/// request which contains the derived keys needed to sign a [`Bolt12Invoice`] for the request
/// if they could be extracted from the metadata.
///
/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
- pub fn verify<T: secp256k1::Signing>(
- self, key: &ExpandedKey, secp_ctx: &Secp256k1<T>
+ pub fn verify<
+ #[cfg(not(c_bindings))]
+ T: secp256k1::Signing
+ >(
+ $self: $self_type, key: &ExpandedKey,
+ #[cfg(not(c_bindings))]
+ secp_ctx: &Secp256k1<T>,
+ #[cfg(c_bindings)]
+ secp_ctx: &Secp256k1<secp256k1::All>,
) -> Result<VerifiedInvoiceRequest, ()> {
- let keys = self.contents.inner.offer.verify(&self.bytes, key, secp_ctx)?;
+ let (offer_id, keys) = $self.contents.inner.offer.verify(&$self.bytes, key, secp_ctx)?;
Ok(VerifiedInvoiceRequest {
- inner: self,
+ offer_id,
+ #[cfg(not(c_bindings))]
+ inner: $self,
+ #[cfg(c_bindings)]
+ inner: $self.clone(),
keys,
})
}
+} }
+
+#[cfg(not(c_bindings))]
+impl InvoiceRequest {
+ offer_accessors!(self, self.contents.inner.offer);
+ invoice_request_accessors!(self, self.contents);
+ invoice_request_respond_with_explicit_signing_pubkey_methods!(self, self, InvoiceBuilder<ExplicitSigningPubkey>);
+ invoice_request_verify_method!(self, Self);
+}
+
+#[cfg(c_bindings)]
+impl InvoiceRequest {
+ offer_accessors!(self, self.contents.inner.offer);
+ invoice_request_accessors!(self, self.contents);
+ invoice_request_respond_with_explicit_signing_pubkey_methods!(self, self, InvoiceWithExplicitSigningPubkeyBuilder);
+ invoice_request_verify_method!(self, &Self);
+}
+
+impl InvoiceRequest {
+ /// Signature of the invoice request using [`payer_id`].
+ ///
+ /// [`payer_id`]: Self::payer_id
+ pub fn signature(&self) -> Signature {
+ self.signature
+ }
+
pub(crate) fn as_tlv_stream(&self) -> FullInvoiceRequestTlvStreamRef {
let (payer_tlv_stream, offer_tlv_stream, invoice_request_tlv_stream) =
self.contents.as_tlv_stream();
}
}
-impl VerifiedInvoiceRequest {
- offer_accessors!(self, self.inner.contents.inner.offer);
- invoice_request_accessors!(self, self.inner.contents);
-
- /// Creates an [`InvoiceBuilder`] for the request with the given required fields and using the
- /// [`Duration`] since [`std::time::SystemTime::UNIX_EPOCH`] as the creation time.
- ///
- /// See [`InvoiceRequest::respond_with_no_std`] for further details.
- ///
- /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
- ///
- /// [`Duration`]: core::time::Duration
- #[cfg(feature = "std")]
- pub fn respond_with(
- &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash
- ) -> Result<InvoiceBuilder<ExplicitSigningPubkey>, Bolt12SemanticError> {
- self.inner.respond_with(payment_paths, payment_hash)
- }
-
- /// Creates an [`InvoiceBuilder`] for the request with the given required fields.
- ///
- /// See [`InvoiceRequest::respond_with_no_std`] for further details.
- ///
- /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
- pub fn respond_with_no_std(
- &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash,
- created_at: core::time::Duration
- ) -> Result<InvoiceBuilder<ExplicitSigningPubkey>, Bolt12SemanticError> {
- self.inner.respond_with_no_std(payment_paths, payment_hash, created_at)
- }
-
+macro_rules! invoice_request_respond_with_derived_signing_pubkey_methods { (
+ $self: ident, $contents: expr, $builder: ty
+) => {
/// Creates an [`InvoiceBuilder`] for the request using the given required fields and that uses
/// derived signing keys from the originating [`Offer`] to sign the [`Bolt12Invoice`]. Must use
/// the same [`ExpandedKey`] as the one used to create the offer.
///
/// See [`InvoiceRequest::respond_with`] for further details.
///
- /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
- ///
/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
#[cfg(feature = "std")]
pub fn respond_using_derived_keys(
- &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash
- ) -> Result<InvoiceBuilder<DerivedSigningPubkey>, Bolt12SemanticError> {
+ &$self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash
+ ) -> Result<$builder, Bolt12SemanticError> {
let created_at = std::time::SystemTime::now()
.duration_since(std::time::SystemTime::UNIX_EPOCH)
.expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
- self.respond_using_derived_keys_no_std(payment_paths, payment_hash, created_at)
+ $self.respond_using_derived_keys_no_std(payment_paths, payment_hash, created_at)
}
/// Creates an [`InvoiceBuilder`] for the request using the given required fields and that uses
///
/// See [`InvoiceRequest::respond_with_no_std`] for further details.
///
- /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
- ///
/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
pub fn respond_using_derived_keys_no_std(
- &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash,
+ &$self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash,
created_at: core::time::Duration
- ) -> Result<InvoiceBuilder<DerivedSigningPubkey>, Bolt12SemanticError> {
- if self.inner.invoice_request_features().requires_unknown_bits() {
+ ) -> Result<$builder, Bolt12SemanticError> {
+ if $self.inner.invoice_request_features().requires_unknown_bits() {
return Err(Bolt12SemanticError::UnknownRequiredFeatures);
}
- let keys = match self.keys {
+ let keys = match $self.keys {
None => return Err(Bolt12SemanticError::InvalidMetadata),
Some(keys) => keys,
};
- InvoiceBuilder::for_offer_using_keys(
- &self.inner, payment_paths, created_at, payment_hash, keys
+ <$builder>::for_offer_using_keys(
+ &$self.inner, payment_paths, created_at, payment_hash, keys
)
}
+} }
+
+impl VerifiedInvoiceRequest {
+ offer_accessors!(self, self.inner.contents.inner.offer);
+ invoice_request_accessors!(self, self.inner.contents);
+ #[cfg(not(c_bindings))]
+ invoice_request_respond_with_explicit_signing_pubkey_methods!(self, self.inner, InvoiceBuilder<ExplicitSigningPubkey>);
+ #[cfg(c_bindings)]
+ invoice_request_respond_with_explicit_signing_pubkey_methods!(self, self.inner, InvoiceWithExplicitSigningPubkeyBuilder);
+ #[cfg(not(c_bindings))]
+ invoice_request_respond_with_derived_signing_pubkey_methods!(self, self.inner, InvoiceBuilder<DerivedSigningPubkey>);
+ #[cfg(c_bindings)]
+ invoice_request_respond_with_derived_signing_pubkey_methods!(self, self.inner, InvoiceWithDerivedSigningPubkeyBuilder);
+
+ pub(crate) fn fields(&self) -> InvoiceRequestFields {
+ let InvoiceRequestContents {
+ payer_id,
+ inner: InvoiceRequestContentsWithoutPayerId {
+ payer: _, offer: _, chain: _, amount_msats, features, quantity, payer_note
+ },
+ } = &self.inner.contents;
+
+ InvoiceRequestFields {
+ payer_id: *payer_id,
+ amount_msats: *amount_msats,
+ features: features.clone(),
+ quantity: *quantity,
+ payer_note_truncated: payer_note.clone()
+ .map(|mut s| { s.truncate(PAYER_NOTE_LIMIT); UntrustedString(s) }),
+ }
+ }
}
impl InvoiceRequestContents {
(payer_tlv_stream, offer_tlv_stream, invoice_request_tlv_stream)
)?;
- let tagged_hash = TaggedHash::new(SIGNATURE_TAG, &bytes);
+ let tagged_hash = TaggedHash::from_valid_tlv_stream_bytes(SIGNATURE_TAG, &bytes);
Ok(UnsignedInvoiceRequest { bytes, contents, tagged_hash })
}
None => return Err(Bolt12ParseError::InvalidSemantics(Bolt12SemanticError::MissingSignature)),
Some(signature) => signature,
};
- let message = TaggedHash::new(SIGNATURE_TAG, &bytes);
+ let message = TaggedHash::from_valid_tlv_stream_bytes(SIGNATURE_TAG, &bytes);
merkle::verify_signature(&signature, &message, contents.payer_id)?;
Ok(InvoiceRequest { bytes, contents, signature })
}
}
+/// Fields sent in an [`InvoiceRequest`] message to include in [`PaymentContext::Bolt12Offer`].
+///
+/// [`PaymentContext::Bolt12Offer`]: crate::blinded_path::payment::PaymentContext::Bolt12Offer
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub struct InvoiceRequestFields {
+ /// A possibly transient pubkey used to sign the invoice request.
+ pub payer_id: PublicKey,
+
+ /// The amount to pay in msats (i.e., the minimum lightning-payable unit for [`chain`]), which
+ /// must be greater than or equal to [`Offer::amount`], converted if necessary.
+ ///
+ /// [`chain`]: InvoiceRequest::chain
+ pub amount_msats: Option<u64>,
+
+ /// Features pertaining to requesting an invoice.
+ pub features: InvoiceRequestFeatures,
+
+ /// The quantity of the offer's item conforming to [`Offer::is_valid_quantity`].
+ pub quantity: Option<u64>,
+
+ /// A payer-provided note which will be seen by the recipient and reflected back in the invoice
+ /// response. Truncated to [`PAYER_NOTE_LIMIT`] characters.
+ pub payer_note_truncated: Option<UntrustedString>,
+}
+
+/// The maximum number of characters included in [`InvoiceRequestFields::payer_note_truncated`].
+pub const PAYER_NOTE_LIMIT: usize = 512;
+
+impl Writeable for InvoiceRequestFields {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+ write_tlv_fields!(writer, {
+ (0, self.payer_id, required),
+ (2, self.amount_msats.map(|v| HighZeroBytesDroppedBigSize(v)), option),
+ (4, WithoutLength(&self.features), required),
+ (6, self.quantity.map(|v| HighZeroBytesDroppedBigSize(v)), option),
+ (8, self.payer_note_truncated.as_ref().map(|s| WithoutLength(&s.0)), option),
+ });
+ Ok(())
+ }
+}
+
+impl Readable for InvoiceRequestFields {
+ fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ _init_and_read_len_prefixed_tlv_fields!(reader, {
+ (0, payer_id, required),
+ (2, amount_msats, (option, encoding: (u64, HighZeroBytesDroppedBigSize))),
+ (4, features, (option, encoding: (InvoiceRequestFeatures, WithoutLength))),
+ (6, quantity, (option, encoding: (u64, HighZeroBytesDroppedBigSize))),
+ (8, payer_note_truncated, (option, encoding: (String, WithoutLength))),
+ });
+ let features = features.unwrap_or(InvoiceRequestFeatures::empty());
+
+ Ok(InvoiceRequestFields {
+ payer_id: payer_id.0.unwrap(), amount_msats, features, quantity,
+ payer_note_truncated: payer_note_truncated.map(|s| UntrustedString(s)),
+ })
+ }
+}
+
#[cfg(test)]
mod tests {
- use super::{InvoiceRequest, InvoiceRequestTlvStreamRef, SIGNATURE_TAG, UnsignedInvoiceRequest};
+ use super::{InvoiceRequest, InvoiceRequestFields, InvoiceRequestTlvStreamRef, PAYER_NOTE_LIMIT, SIGNATURE_TAG, UnsignedInvoiceRequest};
use bitcoin::blockdata::constants::ChainHash;
use bitcoin::network::constants::Network;
use bitcoin::secp256k1::{KeyPair, Secp256k1, SecretKey, self};
- use core::convert::{Infallible, TryFrom};
use core::num::NonZeroU64;
#[cfg(feature = "std")]
use core::time::Duration;
use crate::ln::msgs::{DecodeError, MAX_VALUE_MSAT};
use crate::offers::invoice::{Bolt12Invoice, SIGNATURE_TAG as INVOICE_SIGNATURE_TAG};
use crate::offers::merkle::{SignError, SignatureTlvStreamRef, TaggedHash, self};
- use crate::offers::offer::{Amount, OfferBuilder, OfferTlvStreamRef, Quantity};
+ use crate::offers::offer::{Amount, OfferTlvStreamRef, Quantity};
+ #[cfg(not(c_bindings))]
+ use {
+ crate::offers::offer::OfferBuilder,
+ };
+ #[cfg(c_bindings)]
+ use {
+ crate::offers::offer::OfferWithExplicitMetadataBuilder as OfferBuilder,
+ };
use crate::offers::parse::{Bolt12ParseError, Bolt12SemanticError};
use crate::offers::payer::PayerTlvStreamRef;
use crate::offers::test_utils::*;
- use crate::util::ser::{BigSize, Writeable};
- use crate::util::string::PrintableString;
+ use crate::util::ser::{BigSize, Readable, Writeable};
+ use crate::util::string::{PrintableString, UntrustedString};
#[test]
fn builds_invoice_request_with_defaults() {
.build().unwrap()
.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap();
+ #[cfg(c_bindings)]
+ let mut unsigned_invoice_request = unsigned_invoice_request;
let mut buffer = Vec::new();
unsigned_invoice_request.write(&mut buffer).unwrap();
assert_eq!(invoice_request.payer_id(), payer_pubkey());
assert_eq!(invoice_request.payer_note(), None);
- let message = TaggedHash::new(SIGNATURE_TAG, &invoice_request.bytes);
+ let message = TaggedHash::from_valid_tlv_stream_bytes(SIGNATURE_TAG, &invoice_request.bytes);
assert!(merkle::verify_signature(&invoice_request.signature, &message, payer_pubkey()).is_ok());
assert_eq!(
let mut bytes = Vec::new();
tlv_stream.write(&mut bytes).unwrap();
- let message = TaggedHash::new(INVOICE_SIGNATURE_TAG, &bytes);
+ let message = TaggedHash::from_valid_tlv_stream_bytes(INVOICE_SIGNATURE_TAG, &bytes);
let signature = merkle::sign_message(recipient_sign, &message, recipient_pubkey()).unwrap();
signature_tlv_stream.signature = Some(&signature);
let mut bytes = Vec::new();
tlv_stream.write(&mut bytes).unwrap();
- let message = TaggedHash::new(INVOICE_SIGNATURE_TAG, &bytes);
+ let message = TaggedHash::from_valid_tlv_stream_bytes(INVOICE_SIGNATURE_TAG, &bytes);
let signature = merkle::sign_message(recipient_sign, &message, recipient_pubkey()).unwrap();
signature_tlv_stream.signature = Some(&signature);
let mut bytes = Vec::new();
tlv_stream.write(&mut bytes).unwrap();
- let message = TaggedHash::new(INVOICE_SIGNATURE_TAG, &bytes);
+ let message = TaggedHash::from_valid_tlv_stream_bytes(INVOICE_SIGNATURE_TAG, &bytes);
let signature = merkle::sign_message(recipient_sign, &message, recipient_pubkey()).unwrap();
signature_tlv_stream.signature = Some(&signature);
let mut bytes = Vec::new();
tlv_stream.write(&mut bytes).unwrap();
- let message = TaggedHash::new(INVOICE_SIGNATURE_TAG, &bytes);
+ let message = TaggedHash::from_valid_tlv_stream_bytes(INVOICE_SIGNATURE_TAG, &bytes);
let signature = merkle::sign_message(recipient_sign, &message, recipient_pubkey()).unwrap();
signature_tlv_stream.signature = Some(&signature);
.build().unwrap()
.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap()
- .sign(|_| Err(()))
+ .sign(fail_sign)
{
Ok(_) => panic!("expected error"),
- Err(e) => assert_eq!(e, SignError::Signing(())),
+ Err(e) => assert_eq!(e, SignError::Signing),
}
match OfferBuilder::new("foo".into(), recipient_pubkey())
.build().unwrap()
.request_invoice(vec![1; 32], keys.public_key()).unwrap()
.build().unwrap()
- .sign::<_, Infallible>(
- |message| Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
+ .sign(|message: &UnsignedInvoiceRequest|
+ Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
)
.unwrap();
Err(e) => assert_eq!(e, Bolt12ParseError::Decode(DecodeError::InvalidValue)),
}
}
+
+ #[test]
+ fn copies_verified_invoice_request_fields() {
+ let desc = "foo".to_string();
+ let node_id = recipient_pubkey();
+ let expanded_key = ExpandedKey::new(&KeyMaterial([42; 32]));
+ let entropy = FixedEntropy {};
+ let secp_ctx = Secp256k1::new();
+
+ #[cfg(c_bindings)]
+ use crate::offers::offer::OfferWithDerivedMetadataBuilder as OfferBuilder;
+ let offer = OfferBuilder
+ ::deriving_signing_pubkey(desc, node_id, &expanded_key, &entropy, &secp_ctx)
+ .chain(Network::Testnet)
+ .amount_msats(1000)
+ .supported_quantity(Quantity::Unbounded)
+ .build().unwrap();
+ assert_eq!(offer.signing_pubkey(), node_id);
+
+ let invoice_request = offer.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
+ .chain(Network::Testnet).unwrap()
+ .amount_msats(1001).unwrap()
+ .quantity(1).unwrap()
+ .payer_note("0".repeat(PAYER_NOTE_LIMIT * 2))
+ .build().unwrap()
+ .sign(payer_sign).unwrap();
+ match invoice_request.verify(&expanded_key, &secp_ctx) {
+ Ok(invoice_request) => {
+ let fields = invoice_request.fields();
+ assert_eq!(invoice_request.offer_id, offer.id());
+ assert_eq!(
+ fields,
+ InvoiceRequestFields {
+ payer_id: payer_pubkey(),
+ amount_msats: Some(1001),
+ features: InvoiceRequestFeatures::empty(),
+ quantity: Some(1),
+ payer_note_truncated: Some(UntrustedString("0".repeat(PAYER_NOTE_LIMIT))),
+ }
+ );
+
+ let mut buffer = Vec::new();
+ fields.write(&mut buffer).unwrap();
+
+ let deserialized_fields: InvoiceRequestFields =
+ Readable::read(&mut buffer.as_slice()).unwrap();
+ assert_eq!(deserialized_fields, fields);
+ },
+ Err(_) => panic!("unexpected error"),
+ }
+ }
}
use bitcoin::hashes::{Hash, HashEngine, sha256};
use bitcoin::secp256k1::{Message, PublicKey, Secp256k1, self};
use bitcoin::secp256k1::schnorr::Signature;
-use core::convert::AsRef;
use crate::io;
use crate::util::ser::{BigSize, Readable, Writeable, Writer};
+#[allow(unused_imports)]
use crate::prelude::*;
/// Valid type range for signature TLV records.
}
impl TaggedHash {
+ /// Creates a tagged hash with the given parameters.
+ ///
+ /// Panics if `bytes` is not a well-formed TLV stream containing at least one TLV record.
+ pub(super) fn from_valid_tlv_stream_bytes(tag: &'static str, bytes: &[u8]) -> Self {
+ let tlv_stream = TlvStream::new(bytes);
+ Self::from_tlv_stream(tag, tlv_stream)
+ }
+
/// Creates a tagged hash with the given parameters.
///
/// Panics if `tlv_stream` is not a well-formed TLV stream containing at least one TLV record.
- pub(super) fn new(tag: &'static str, tlv_stream: &[u8]) -> Self {
+ pub(super) fn from_tlv_stream<'a, I: core::iter::Iterator<Item = TlvRecord<'a>>>(
+ tag: &'static str, tlv_stream: I
+ ) -> Self {
let tag_hash = sha256::Hash::hash(tag.as_bytes());
let merkle_root = root_hash(tlv_stream);
let digest = Message::from_slice(tagged_hash(tag_hash, merkle_root).as_byte_array()).unwrap();
pub fn merkle_root(&self) -> sha256::Hash {
self.merkle_root
}
+
+ pub(super) fn to_bytes(&self) -> [u8; 32] {
+ *self.digest.as_ref()
+ }
}
impl AsRef<TaggedHash> for TaggedHash {
/// Error when signing messages.
#[derive(Debug, PartialEq)]
-pub enum SignError<E> {
+pub enum SignError {
/// User-defined error when signing the message.
- Signing(E),
+ Signing,
/// Error when verifying the produced signature using the given pubkey.
Verification(secp256k1::Error),
}
+/// A function for signing a [`TaggedHash`].
+pub(super) trait SignFn<T: AsRef<TaggedHash>> {
+ /// Signs a [`TaggedHash`] computed over the merkle root of `message`'s TLV stream.
+ fn sign(&self, message: &T) -> Result<Signature, ()>;
+}
+
+impl<F> SignFn<TaggedHash> for F
+where
+ F: Fn(&TaggedHash) -> Result<Signature, ()>,
+{
+ fn sign(&self, message: &TaggedHash) -> Result<Signature, ()> {
+ self(message)
+ }
+}
+
/// Signs a [`TaggedHash`] computed over the merkle root of `message`'s TLV stream, checking if it
/// can be verified with the supplied `pubkey`.
///
///
/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
-pub(super) fn sign_message<F, E, T>(
- sign: F, message: &T, pubkey: PublicKey,
-) -> Result<Signature, SignError<E>>
+pub(super) fn sign_message<F, T>(
+ f: F, message: &T, pubkey: PublicKey,
+) -> Result<Signature, SignError>
where
- F: FnOnce(&T) -> Result<Signature, E>,
+ F: SignFn<T>,
T: AsRef<TaggedHash>,
{
- let signature = sign(message).map_err(|e| SignError::Signing(e))?;
+ let signature = f.sign(message).map_err(|()| SignError::Signing)?;
let digest = message.as_ref().as_digest();
let pubkey = pubkey.into();
/// Computes a merkle root hash for the given data, which must be a well-formed TLV stream
/// containing at least one TLV record.
-fn root_hash(data: &[u8]) -> sha256::Hash {
+fn root_hash<'a, I: core::iter::Iterator<Item = TlvRecord<'a>>>(tlv_stream: I) -> sha256::Hash {
+ let mut tlv_stream = tlv_stream.peekable();
let nonce_tag = tagged_hash_engine(sha256::Hash::from_engine({
- let first_tlv_record = TlvStream::new(&data[..]).next().unwrap();
+ let first_tlv_record = tlv_stream.peek().unwrap();
let mut engine = sha256::Hash::engine();
engine.input("LnNonce".as_bytes());
engine.input(first_tlv_record.record_bytes);
let branch_tag = tagged_hash_engine(sha256::Hash::hash("LnBranch".as_bytes()));
let mut leaves = Vec::new();
- let tlv_stream = TlvStream::new(&data[..]);
- for record in tlv_stream.skip_signatures() {
+ for record in TlvStream::skip_signatures(tlv_stream) {
leaves.push(tagged_hash_from_engine(leaf_tag.clone(), &record.record_bytes));
leaves.push(tagged_hash_from_engine(nonce_tag.clone(), &record.type_bytes));
}
.take_while(move |record| take_range.contains(&record.r#type))
}
- fn skip_signatures(self) -> core::iter::Filter<TlvStream<'a>, fn(&TlvRecord) -> bool> {
- self.filter(|record| !SIGNATURE_TYPES.contains(&record.r#type))
+ fn skip_signatures(
+ tlv_stream: impl core::iter::Iterator<Item = TlvRecord<'a>>
+ ) -> impl core::iter::Iterator<Item = TlvRecord<'a>> {
+ tlv_stream.filter(|record| !SIGNATURE_TYPES.contains(&record.r#type))
}
}
#[inline]
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
let tlv_stream = TlvStream::new(self.0);
- for record in tlv_stream.skip_signatures() {
+ for record in TlvStream::skip_signatures(tlv_stream) {
writer.write_all(record.record_bytes)?;
}
Ok(())
use bitcoin::hashes::hex::FromHex;
use bitcoin::secp256k1::{KeyPair, Message, Secp256k1, SecretKey};
use bitcoin::secp256k1::schnorr::Signature;
- use core::convert::Infallible;
use crate::offers::offer::{Amount, OfferBuilder};
- use crate::offers::invoice_request::InvoiceRequest;
+ use crate::offers::invoice_request::{InvoiceRequest, UnsignedInvoiceRequest};
use crate::offers::parse::Bech32Encode;
use crate::offers::test_utils::{payer_pubkey, recipient_pubkey};
use crate::util::ser::Writeable;
macro_rules! tlv2 { () => { "02080000010000020003" } }
macro_rules! tlv3 { () => { "03310266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c0351800000000000000010000000000000002" } }
assert_eq!(
- super::root_hash(&<Vec<u8>>::from_hex(tlv1!()).unwrap()),
+ super::root_hash(TlvStream::new(&<Vec<u8>>::from_hex(tlv1!()).unwrap())),
sha256::Hash::from_slice(&<Vec<u8>>::from_hex("b013756c8fee86503a0b4abdab4cddeb1af5d344ca6fc2fa8b6c08938caa6f93").unwrap()).unwrap(),
);
assert_eq!(
- super::root_hash(&<Vec<u8>>::from_hex(concat!(tlv1!(), tlv2!())).unwrap()),
+ super::root_hash(TlvStream::new(&<Vec<u8>>::from_hex(concat!(tlv1!(), tlv2!())).unwrap())),
sha256::Hash::from_slice(&<Vec<u8>>::from_hex("c3774abbf4815aa54ccaa026bff6581f01f3be5fe814c620a252534f434bc0d1").unwrap()).unwrap(),
);
assert_eq!(
- super::root_hash(&<Vec<u8>>::from_hex(concat!(tlv1!(), tlv2!(), tlv3!())).unwrap()),
+ super::root_hash(TlvStream::new(&<Vec<u8>>::from_hex(concat!(tlv1!(), tlv2!(), tlv3!())).unwrap())),
sha256::Hash::from_slice(&<Vec<u8>>::from_hex("ab2e79b1283b0b31e0b035258de23782df6b89a38cfa7237bde69aed1a658c5d").unwrap()).unwrap(),
);
}
.build_unchecked()
.request_invoice(vec![0; 8], payer_keys.public_key()).unwrap()
.build_unchecked()
- .sign::<_, Infallible>(
- |message| Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &payer_keys))
+ .sign(|message: &UnsignedInvoiceRequest|
+ Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &payer_keys))
)
.unwrap();
assert_eq!(
"lnr1qqyqqqqqqqqqqqqqqcp4256ypqqkgzshgysy6ct5dpjk6ct5d93kzmpq23ex2ct5d9ek293pqthvwfzadd7jejes8q9lhc4rvjxd022zv5l44g6qah82ru5rdpnpjkppqvjx204vgdzgsqpvcp4mldl3plscny0rt707gvpdh6ndydfacz43euzqhrurageg3n7kafgsek6gz3e9w52parv8gs2hlxzk95tzeswywffxlkeyhml0hh46kndmwf4m6xma3tkq2lu04qz3slje2rfthc89vss",
);
assert_eq!(
- super::root_hash(&invoice_request.bytes[..]),
+ super::root_hash(TlvStream::new(&invoice_request.bytes[..])),
sha256::Hash::from_slice(&<Vec<u8>>::from_hex("608407c18ad9a94d9ea2bcdbe170b6c20c462a7833a197621c916f78cf18e624").unwrap()).unwrap(),
);
assert_eq!(
.build_unchecked()
.request_invoice(vec![0; 8], payer_keys.public_key()).unwrap()
.build_unchecked()
- .sign::<_, Infallible>(
- |message| Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &payer_keys))
+ .sign(|message: &UnsignedInvoiceRequest|
+ Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &payer_keys))
)
.unwrap();
.build_unchecked()
.request_invoice(vec![0; 8], payer_keys.public_key()).unwrap()
.build_unchecked()
- .sign::<_, Infallible>(
- |message| Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &payer_keys))
+ .sign(|message: &UnsignedInvoiceRequest|
+ Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &payer_keys))
)
.unwrap();
use bitcoin::blockdata::constants::ChainHash;
use bitcoin::network::constants::Network;
use bitcoin::secp256k1::{KeyPair, PublicKey, Secp256k1, self};
-use core::convert::TryFrom;
+use core::hash::{Hash, Hasher};
use core::num::NonZeroU64;
use core::ops::Deref;
use core::str::FromStr;
use crate::ln::channelmanager::PaymentId;
use crate::ln::features::OfferFeatures;
use crate::ln::inbound_payment::{ExpandedKey, IV_LEN, Nonce};
-use crate::ln::msgs::MAX_VALUE_MSAT;
-use crate::offers::invoice_request::{DerivedPayerId, ExplicitPayerId, InvoiceRequestBuilder};
-use crate::offers::merkle::TlvStream;
+use crate::ln::msgs::{DecodeError, MAX_VALUE_MSAT};
+use crate::offers::merkle::{TaggedHash, TlvStream};
use crate::offers::parse::{Bech32Encode, Bolt12ParseError, Bolt12SemanticError, ParsedMessage};
use crate::offers::signer::{Metadata, MetadataMaterial, self};
-use crate::util::ser::{HighZeroBytesDroppedBigSize, WithoutLength, Writeable, Writer};
+use crate::util::ser::{HighZeroBytesDroppedBigSize, Readable, WithoutLength, Writeable, Writer};
use crate::util::string::PrintableString;
+#[cfg(not(c_bindings))]
+use {
+ crate::offers::invoice_request::{DerivedPayerId, ExplicitPayerId, InvoiceRequestBuilder},
+};
+#[cfg(c_bindings)]
+use {
+ crate::offers::invoice_request::{InvoiceRequestWithDerivedPayerIdBuilder, InvoiceRequestWithExplicitPayerIdBuilder},
+};
+
+#[allow(unused_imports)]
use crate::prelude::*;
#[cfg(feature = "std")]
pub(super) const IV_BYTES: &[u8; IV_LEN] = b"LDK Offer ~~~~~~";
+/// An identifier for an [`Offer`] built using [`DerivedMetadata`].
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub struct OfferId(pub [u8; 32]);
+
+impl OfferId {
+ const ID_TAG: &'static str = "LDK Offer ID";
+
+ fn from_valid_offer_tlv_stream(bytes: &[u8]) -> Self {
+ let tagged_hash = TaggedHash::from_valid_tlv_stream_bytes(Self::ID_TAG, bytes);
+ Self(tagged_hash.to_bytes())
+ }
+
+ fn from_valid_invreq_tlv_stream(bytes: &[u8]) -> Self {
+ let tlv_stream = TlvStream::new(bytes).range(OFFER_TYPES);
+ let tagged_hash = TaggedHash::from_tlv_stream(Self::ID_TAG, tlv_stream);
+ Self(tagged_hash.to_bytes())
+ }
+}
+
+impl Writeable for OfferId {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ self.0.write(w)
+ }
+}
+
+impl Readable for OfferId {
+ fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+ Ok(OfferId(Readable::read(r)?))
+ }
+}
+
/// Builds an [`Offer`] for the "offer to be paid" flow.
///
/// See [module-level documentation] for usage.
secp_ctx: Option<&'a Secp256k1<T>>,
}
+/// Builds an [`Offer`] for the "offer to be paid" flow.
+///
+/// See [module-level documentation] for usage.
+///
+/// This is not exported to bindings users as builder patterns don't map outside of move semantics.
+///
+/// [module-level documentation]: self
+#[cfg(c_bindings)]
+pub struct OfferWithExplicitMetadataBuilder<'a> {
+ offer: OfferContents,
+ metadata_strategy: core::marker::PhantomData<ExplicitMetadata>,
+ secp_ctx: Option<&'a Secp256k1<secp256k1::All>>,
+}
+
+/// Builds an [`Offer`] for the "offer to be paid" flow.
+///
+/// See [module-level documentation] for usage.
+///
+/// This is not exported to bindings users as builder patterns don't map outside of move semantics.
+///
+/// [module-level documentation]: self
+#[cfg(c_bindings)]
+pub struct OfferWithDerivedMetadataBuilder<'a> {
+ offer: OfferContents,
+ metadata_strategy: core::marker::PhantomData<DerivedMetadata>,
+ secp_ctx: Option<&'a Secp256k1<secp256k1::All>>,
+}
+
/// Indicates how [`Offer::metadata`] may be set.
///
/// This is not exported to bindings users as builder patterns don't map outside of move semantics.
pub struct DerivedMetadata {}
impl MetadataStrategy for ExplicitMetadata {}
+
impl MetadataStrategy for DerivedMetadata {}
-impl<'a> OfferBuilder<'a, ExplicitMetadata, secp256k1::SignOnly> {
+macro_rules! offer_explicit_metadata_builder_methods { (
+ $self: ident, $self_type: ty, $return_type: ty, $return_value: expr
+) => {
/// Creates a new builder for an offer setting the [`Offer::description`] and using the
/// [`Offer::signing_pubkey`] for signing invoices. The associated secret key must be remembered
/// while the offer is valid.
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
/// [`ChannelManager::create_offer_builder`]: crate::ln::channelmanager::ChannelManager::create_offer_builder
pub fn new(description: String, signing_pubkey: PublicKey) -> Self {
- OfferBuilder {
+ Self {
offer: OfferContents {
chains: None, metadata: None, amount: None, description,
features: OfferFeatures::empty(), absolute_expiry: None, issuer: None, paths: None,
/// Sets the [`Offer::metadata`] to the given bytes.
///
/// Successive calls to this method will override the previous setting.
- pub fn metadata(mut self, metadata: Vec<u8>) -> Result<Self, Bolt12SemanticError> {
- self.offer.metadata = Some(Metadata::Bytes(metadata));
- Ok(self)
+ pub fn metadata(mut $self: $self_type, metadata: Vec<u8>) -> Result<$return_type, Bolt12SemanticError> {
+ $self.offer.metadata = Some(Metadata::Bytes(metadata));
+ Ok($return_value)
}
-}
+} }
-impl<'a, T: secp256k1::Signing> OfferBuilder<'a, DerivedMetadata, T> {
+macro_rules! offer_derived_metadata_builder_methods { ($secp_context: ty) => {
/// Similar to [`OfferBuilder::new`] except, if [`OfferBuilder::path`] is called, the signing
/// pubkey is derived from the given [`ExpandedKey`] and [`EntropySource`]. This provides
/// recipient privacy by using a different signing pubkey for each offer. Otherwise, the
/// [`ExpandedKey`]: crate::ln::inbound_payment::ExpandedKey
pub fn deriving_signing_pubkey<ES: Deref>(
description: String, node_id: PublicKey, expanded_key: &ExpandedKey, entropy_source: ES,
- secp_ctx: &'a Secp256k1<T>
+ secp_ctx: &'a Secp256k1<$secp_context>
) -> Self where ES::Target: EntropySource {
let nonce = Nonce::from_entropy_source(entropy_source);
let derivation_material = MetadataMaterial::new(nonce, expanded_key, IV_BYTES, None);
let metadata = Metadata::DerivedSigningPubkey(derivation_material);
- OfferBuilder {
+ Self {
offer: OfferContents {
chains: None, metadata: Some(metadata), amount: None, description,
features: OfferFeatures::empty(), absolute_expiry: None, issuer: None, paths: None,
secp_ctx: Some(secp_ctx),
}
}
-}
+} }
-impl<'a, M: MetadataStrategy, T: secp256k1::Signing> OfferBuilder<'a, M, T> {
+macro_rules! offer_builder_methods { (
+ $self: ident, $self_type: ty, $return_type: ty, $return_value: expr $(, $self_mut: tt)?
+) => {
/// Adds the chain hash of the given [`Network`] to [`Offer::chains`]. If not called,
/// the chain hash of [`Network::Bitcoin`] is assumed to be the only one supported.
///
/// See [`Offer::chains`] on how this relates to the payment currency.
///
/// Successive calls to this method will add another chain hash.
- pub fn chain(self, network: Network) -> Self {
- self.chain_hash(ChainHash::using_genesis_block(network))
+ pub fn chain($self: $self_type, network: Network) -> $return_type {
+ $self.chain_hash(ChainHash::using_genesis_block(network))
}
/// Adds the [`ChainHash`] to [`Offer::chains`]. If not called, the chain hash of
/// See [`Offer::chains`] on how this relates to the payment currency.
///
/// Successive calls to this method will add another chain hash.
- pub(crate) fn chain_hash(mut self, chain: ChainHash) -> Self {
- let chains = self.offer.chains.get_or_insert_with(Vec::new);
+ pub(crate) fn chain_hash($($self_mut)* $self: $self_type, chain: ChainHash) -> $return_type {
+ let chains = $self.offer.chains.get_or_insert_with(Vec::new);
if !chains.contains(&chain) {
chains.push(chain);
}
- self
+ $return_value
}
/// Sets the [`Offer::amount`] as an [`Amount::Bitcoin`].
///
/// Successive calls to this method will override the previous setting.
- pub fn amount_msats(self, amount_msats: u64) -> Self {
- self.amount(Amount::Bitcoin { amount_msats })
+ pub fn amount_msats($self: $self_type, amount_msats: u64) -> $return_type {
+ $self.amount(Amount::Bitcoin { amount_msats })
}
/// Sets the [`Offer::amount`].
///
/// Successive calls to this method will override the previous setting.
- pub(super) fn amount(mut self, amount: Amount) -> Self {
- self.offer.amount = Some(amount);
- self
+ pub(super) fn amount($($self_mut)* $self: $self_type, amount: Amount) -> $return_type {
+ $self.offer.amount = Some(amount);
+ $return_value
}
/// Sets the [`Offer::absolute_expiry`] as seconds since the Unix epoch. Any expiry that has
/// already passed is valid and can be checked for using [`Offer::is_expired`].
///
/// Successive calls to this method will override the previous setting.
- pub fn absolute_expiry(mut self, absolute_expiry: Duration) -> Self {
- self.offer.absolute_expiry = Some(absolute_expiry);
- self
+ pub fn absolute_expiry($($self_mut)* $self: $self_type, absolute_expiry: Duration) -> $return_type {
+ $self.offer.absolute_expiry = Some(absolute_expiry);
+ $return_value
}
/// Sets the [`Offer::issuer`].
///
/// Successive calls to this method will override the previous setting.
- pub fn issuer(mut self, issuer: String) -> Self {
- self.offer.issuer = Some(issuer);
- self
+ pub fn issuer($($self_mut)* $self: $self_type, issuer: String) -> $return_type {
+ $self.offer.issuer = Some(issuer);
+ $return_value
}
/// Adds a blinded path to [`Offer::paths`]. Must include at least one path if only connected by
///
/// Successive calls to this method will add another blinded path. Caller is responsible for not
/// adding duplicate paths.
- pub fn path(mut self, path: BlindedPath) -> Self {
- self.offer.paths.get_or_insert_with(Vec::new).push(path);
- self
+ pub fn path($($self_mut)* $self: $self_type, path: BlindedPath) -> $return_type {
+ $self.offer.paths.get_or_insert_with(Vec::new).push(path);
+ $return_value
}
/// Sets the quantity of items for [`Offer::supported_quantity`]. If not called, defaults to
/// [`Quantity::One`].
///
/// Successive calls to this method will override the previous setting.
- pub fn supported_quantity(mut self, quantity: Quantity) -> Self {
- self.offer.supported_quantity = quantity;
- self
+ pub fn supported_quantity($($self_mut)* $self: $self_type, quantity: Quantity) -> $return_type {
+ $self.offer.supported_quantity = quantity;
+ $return_value
}
/// Builds an [`Offer`] from the builder's settings.
- pub fn build(mut self) -> Result<Offer, Bolt12SemanticError> {
- match self.offer.amount {
+ pub fn build($($self_mut)* $self: $self_type) -> Result<Offer, Bolt12SemanticError> {
+ match $self.offer.amount {
Some(Amount::Bitcoin { amount_msats }) => {
if amount_msats > MAX_VALUE_MSAT {
return Err(Bolt12SemanticError::InvalidAmount);
None => {},
}
- if let Some(chains) = &self.offer.chains {
- if chains.len() == 1 && chains[0] == self.offer.implied_chain() {
- self.offer.chains = None;
+ if let Some(chains) = &$self.offer.chains {
+ if chains.len() == 1 && chains[0] == $self.offer.implied_chain() {
+ $self.offer.chains = None;
}
}
- Ok(self.build_without_checks())
+ Ok($self.build_without_checks())
}
- fn build_without_checks(mut self) -> Offer {
+ fn build_without_checks($($self_mut)* $self: $self_type) -> Offer {
// Create the metadata for stateless verification of an InvoiceRequest.
- if let Some(mut metadata) = self.offer.metadata.take() {
+ if let Some(mut metadata) = $self.offer.metadata.take() {
if metadata.has_derivation_material() {
- if self.offer.paths.is_none() {
+ if $self.offer.paths.is_none() {
metadata = metadata.without_keys();
}
- let mut tlv_stream = self.offer.as_tlv_stream();
+ let mut tlv_stream = $self.offer.as_tlv_stream();
debug_assert_eq!(tlv_stream.metadata, None);
tlv_stream.metadata = None;
if metadata.derives_recipient_keys() {
tlv_stream.node_id = None;
}
- let (derived_metadata, keys) = metadata.derive_from(tlv_stream, self.secp_ctx);
+ let (derived_metadata, keys) = metadata.derive_from(tlv_stream, $self.secp_ctx);
metadata = derived_metadata;
if let Some(keys) = keys {
- self.offer.signing_pubkey = keys.public_key();
+ $self.offer.signing_pubkey = keys.public_key();
}
}
- self.offer.metadata = Some(metadata);
+ $self.offer.metadata = Some(metadata);
}
let mut bytes = Vec::new();
- self.offer.write(&mut bytes).unwrap();
+ $self.offer.write(&mut bytes).unwrap();
+
+ let id = OfferId::from_valid_offer_tlv_stream(&bytes);
- Offer { bytes, contents: self.offer }
+ Offer {
+ bytes,
+ #[cfg(not(c_bindings))]
+ contents: $self.offer,
+ #[cfg(c_bindings)]
+ contents: $self.offer.clone(),
+ id,
+ }
}
-}
+} }
#[cfg(test)]
+macro_rules! offer_builder_test_methods { (
+ $self: ident, $self_type: ty, $return_type: ty, $return_value: expr $(, $self_mut: tt)?
+) => {
+ #[cfg_attr(c_bindings, allow(dead_code))]
+ fn features_unchecked($($self_mut)* $self: $self_type, features: OfferFeatures) -> $return_type {
+ $self.offer.features = features;
+ $return_value
+ }
+
+ #[cfg_attr(c_bindings, allow(dead_code))]
+ pub(crate) fn clear_chains($($self_mut)* $self: $self_type) -> $return_type {
+ $self.offer.chains = None;
+ $return_value
+ }
+
+ #[cfg_attr(c_bindings, allow(dead_code))]
+ pub(crate) fn clear_paths($($self_mut)* $self: $self_type) -> $return_type {
+ $self.offer.paths = None;
+ $return_value
+ }
+
+ #[cfg_attr(c_bindings, allow(dead_code))]
+ pub(super) fn build_unchecked($self: $self_type) -> Offer {
+ $self.build_without_checks()
+ }
+} }
+
impl<'a, M: MetadataStrategy, T: secp256k1::Signing> OfferBuilder<'a, M, T> {
- fn features_unchecked(mut self, features: OfferFeatures) -> Self {
- self.offer.features = features;
- self
+ offer_builder_methods!(self, Self, Self, self, mut);
+
+ #[cfg(test)]
+ offer_builder_test_methods!(self, Self, Self, self, mut);
+}
+
+impl<'a> OfferBuilder<'a, ExplicitMetadata, secp256k1::SignOnly> {
+ offer_explicit_metadata_builder_methods!(self, Self, Self, self);
+}
+
+impl<'a, T: secp256k1::Signing> OfferBuilder<'a, DerivedMetadata, T> {
+ offer_derived_metadata_builder_methods!(T);
+}
+
+#[cfg(all(c_bindings, not(test)))]
+impl<'a> OfferWithExplicitMetadataBuilder<'a> {
+ offer_explicit_metadata_builder_methods!(self, &mut Self, (), ());
+ offer_builder_methods!(self, &mut Self, (), ());
+}
+
+#[cfg(all(c_bindings, test))]
+impl<'a> OfferWithExplicitMetadataBuilder<'a> {
+ offer_explicit_metadata_builder_methods!(self, &mut Self, &mut Self, self);
+ offer_builder_methods!(self, &mut Self, &mut Self, self);
+ offer_builder_test_methods!(self, &mut Self, &mut Self, self);
+}
+
+#[cfg(all(c_bindings, not(test)))]
+impl<'a> OfferWithDerivedMetadataBuilder<'a> {
+ offer_derived_metadata_builder_methods!(secp256k1::All);
+ offer_builder_methods!(self, &mut Self, (), ());
+}
+
+#[cfg(all(c_bindings, test))]
+impl<'a> OfferWithDerivedMetadataBuilder<'a> {
+ offer_derived_metadata_builder_methods!(secp256k1::All);
+ offer_builder_methods!(self, &mut Self, &mut Self, self);
+ offer_builder_test_methods!(self, &mut Self, &mut Self, self);
+}
+
+#[cfg(c_bindings)]
+impl<'a> From<OfferBuilder<'a, DerivedMetadata, secp256k1::All>>
+for OfferWithDerivedMetadataBuilder<'a> {
+ fn from(builder: OfferBuilder<'a, DerivedMetadata, secp256k1::All>) -> Self {
+ let OfferBuilder { offer, metadata_strategy, secp_ctx } = builder;
+
+ Self { offer, metadata_strategy, secp_ctx }
}
+}
- pub(super) fn build_unchecked(self) -> Offer {
- self.build_without_checks()
+#[cfg(c_bindings)]
+impl<'a> From<OfferWithDerivedMetadataBuilder<'a>>
+for OfferBuilder<'a, DerivedMetadata, secp256k1::All> {
+ fn from(builder: OfferWithDerivedMetadataBuilder<'a>) -> Self {
+ let OfferWithDerivedMetadataBuilder { offer, metadata_strategy, secp_ctx } = builder;
+
+ Self { offer, metadata_strategy, secp_ctx }
}
}
/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
#[derive(Clone, Debug)]
-#[cfg_attr(test, derive(PartialEq))]
pub struct Offer {
// The serialized offer. Needed when creating an `InvoiceRequest` if the offer contains unknown
// fields.
pub(super) bytes: Vec<u8>,
pub(super) contents: OfferContents,
+ id: OfferId,
}
/// The contents of an [`Offer`], which may be shared with an [`InvoiceRequest`] or a
impl Offer {
offer_accessors!(self, self.contents);
+ /// Returns the id of the offer.
+ pub fn id(&self) -> OfferId {
+ self.id
+ }
+
pub(super) fn implied_chain(&self) -> ChainHash {
self.contents.implied_chain()
}
pub fn expects_quantity(&self) -> bool {
self.contents.expects_quantity()
}
+}
+macro_rules! request_invoice_derived_payer_id { ($self: ident, $builder: ty) => {
/// Similar to [`Offer::request_invoice`] except it:
/// - derives the [`InvoiceRequest::payer_id`] such that a different key can be used for each
/// request,
///
/// Useful to protect the sender's privacy.
///
- /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
- ///
/// [`InvoiceRequest::payer_id`]: crate::offers::invoice_request::InvoiceRequest::payer_id
/// [`InvoiceRequest::payer_metadata`]: crate::offers::invoice_request::InvoiceRequest::payer_metadata
/// [`Bolt12Invoice::verify`]: crate::offers::invoice::Bolt12Invoice::verify
/// [`ExpandedKey`]: crate::ln::inbound_payment::ExpandedKey
- pub fn request_invoice_deriving_payer_id<'a, 'b, ES: Deref, T: secp256k1::Signing>(
- &'a self, expanded_key: &ExpandedKey, entropy_source: ES, secp_ctx: &'b Secp256k1<T>,
+ pub fn request_invoice_deriving_payer_id<
+ 'a, 'b, ES: Deref,
+ #[cfg(not(c_bindings))]
+ T: secp256k1::Signing
+ >(
+ &'a $self, expanded_key: &ExpandedKey, entropy_source: ES,
+ #[cfg(not(c_bindings))]
+ secp_ctx: &'b Secp256k1<T>,
+ #[cfg(c_bindings)]
+ secp_ctx: &'b Secp256k1<secp256k1::All>,
payment_id: PaymentId
- ) -> Result<InvoiceRequestBuilder<'a, 'b, DerivedPayerId, T>, Bolt12SemanticError>
+ ) -> Result<$builder, Bolt12SemanticError>
where
ES::Target: EntropySource,
{
- if self.offer_features().requires_unknown_bits() {
+ if $self.offer_features().requires_unknown_bits() {
return Err(Bolt12SemanticError::UnknownRequiredFeatures);
}
- Ok(InvoiceRequestBuilder::deriving_payer_id(
- self, expanded_key, entropy_source, secp_ctx, payment_id
- ))
+ Ok(<$builder>::deriving_payer_id($self, expanded_key, entropy_source, secp_ctx, payment_id))
}
+} }
+macro_rules! request_invoice_explicit_payer_id { ($self: ident, $builder: ty) => {
/// Similar to [`Offer::request_invoice_deriving_payer_id`] except uses `payer_id` for the
/// [`InvoiceRequest::payer_id`] instead of deriving a different key for each request.
///
/// Useful for recurring payments using the same `payer_id` with different invoices.
///
- /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
- ///
/// [`InvoiceRequest::payer_id`]: crate::offers::invoice_request::InvoiceRequest::payer_id
pub fn request_invoice_deriving_metadata<ES: Deref>(
- &self, payer_id: PublicKey, expanded_key: &ExpandedKey, entropy_source: ES,
+ &$self, payer_id: PublicKey, expanded_key: &ExpandedKey, entropy_source: ES,
payment_id: PaymentId
- ) -> Result<InvoiceRequestBuilder<ExplicitPayerId, secp256k1::SignOnly>, Bolt12SemanticError>
+ ) -> Result<$builder, Bolt12SemanticError>
where
ES::Target: EntropySource,
{
- if self.offer_features().requires_unknown_bits() {
+ if $self.offer_features().requires_unknown_bits() {
return Err(Bolt12SemanticError::UnknownRequiredFeatures);
}
- Ok(InvoiceRequestBuilder::deriving_metadata(
- self, payer_id, expanded_key, entropy_source, payment_id
- ))
+ Ok(<$builder>::deriving_metadata($self, payer_id, expanded_key, entropy_source, payment_id))
}
/// Creates an [`InvoiceRequestBuilder`] for the offer with the given `metadata` and `payer_id`,
///
/// Errors if the offer contains unknown required features.
///
- /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
- ///
/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
pub fn request_invoice(
- &self, metadata: Vec<u8>, payer_id: PublicKey
- ) -> Result<InvoiceRequestBuilder<ExplicitPayerId, secp256k1::SignOnly>, Bolt12SemanticError> {
- if self.offer_features().requires_unknown_bits() {
+ &$self, metadata: Vec<u8>, payer_id: PublicKey
+ ) -> Result<$builder, Bolt12SemanticError> {
+ if $self.offer_features().requires_unknown_bits() {
return Err(Bolt12SemanticError::UnknownRequiredFeatures);
}
- Ok(InvoiceRequestBuilder::new(self, metadata, payer_id))
+ Ok(<$builder>::new($self, metadata, payer_id))
}
+} }
- #[cfg(test)]
+#[cfg(not(c_bindings))]
+impl Offer {
+ request_invoice_derived_payer_id!(self, InvoiceRequestBuilder<'a, 'b, DerivedPayerId, T>);
+ request_invoice_explicit_payer_id!(self, InvoiceRequestBuilder<ExplicitPayerId, secp256k1::SignOnly>);
+}
+
+#[cfg(c_bindings)]
+impl Offer {
+ request_invoice_derived_payer_id!(self, InvoiceRequestWithDerivedPayerIdBuilder<'a, 'b>);
+ request_invoice_explicit_payer_id!(self, InvoiceRequestWithExplicitPayerIdBuilder);
+}
+
+#[cfg(test)]
+impl Offer {
pub(super) fn as_tlv_stream(&self) -> OfferTlvStreamRef {
self.contents.as_tlv_stream()
}
}
}
+impl PartialEq for Offer {
+ fn eq(&self, other: &Self) -> bool {
+ self.bytes.eq(&other.bytes)
+ }
+}
+
+impl Eq for Offer {}
+
+impl Hash for Offer {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.bytes.hash(state);
+ }
+}
+
impl OfferContents {
pub fn chains(&self) -> Vec<ChainHash> {
self.chains.as_ref().cloned().unwrap_or_else(|| vec![self.implied_chain()])
/// Verifies that the offer metadata was produced from the offer in the TLV stream.
pub(super) fn verify<T: secp256k1::Signing>(
&self, bytes: &[u8], key: &ExpandedKey, secp_ctx: &Secp256k1<T>
- ) -> Result<Option<KeyPair>, ()> {
+ ) -> Result<(OfferId, Option<KeyPair>), ()> {
match self.metadata() {
Some(metadata) => {
let tlv_stream = TlvStream::new(bytes).range(OFFER_TYPES).filter(|record| {
_ => true,
}
});
- signer::verify_recipient_metadata(
+ let keys = signer::verify_recipient_metadata(
metadata, key, IV_BYTES, self.signing_pubkey(), tlv_stream, secp_ctx
- )
+ )?;
+
+ let offer_id = OfferId::from_valid_invreq_tlv_stream(bytes);
+
+ Ok((offer_id, keys))
},
None => Err(()),
}
let offer = ParsedMessage::<OfferTlvStream>::try_from(bytes)?;
let ParsedMessage { bytes, tlv_stream } = offer;
let contents = OfferContents::try_from(tlv_stream)?;
- Ok(Offer { bytes, contents })
+ let id = OfferId::from_valid_offer_tlv_stream(&bytes);
+
+ Ok(Offer { bytes, contents, id })
}
}
#[cfg(test)]
mod tests {
- use super::{Amount, Offer, OfferBuilder, OfferTlvStreamRef, Quantity};
+ use super::{Amount, Offer, OfferTlvStreamRef, Quantity};
+ #[cfg(not(c_bindings))]
+ use {
+ super::OfferBuilder,
+ };
+ #[cfg(c_bindings)]
+ use {
+ super::OfferWithExplicitMetadataBuilder as OfferBuilder,
+ };
use bitcoin::blockdata::constants::ChainHash;
use bitcoin::network::constants::Network;
use bitcoin::secp256k1::Secp256k1;
- use core::convert::TryFrom;
use core::num::NonZeroU64;
use core::time::Duration;
- use crate::blinded_path::{BlindedHop, BlindedPath};
+ use crate::blinded_path::{BlindedHop, BlindedPath, IntroductionNode};
use crate::sign::KeyMaterial;
use crate::ln::features::OfferFeatures;
use crate::ln::inbound_payment::ExpandedKey;
let entropy = FixedEntropy {};
let secp_ctx = Secp256k1::new();
+ #[cfg(c_bindings)]
+ use super::OfferWithDerivedMetadataBuilder as OfferBuilder;
let offer = OfferBuilder
::deriving_signing_pubkey(desc, node_id, &expanded_key, &entropy, &secp_ctx)
.amount_msats(1000)
let invoice_request = offer.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap()
.sign(payer_sign).unwrap();
- assert!(invoice_request.verify(&expanded_key, &secp_ctx).is_ok());
+ match invoice_request.verify(&expanded_key, &secp_ctx) {
+ Ok(invoice_request) => assert_eq!(invoice_request.offer_id, offer.id()),
+ Err(_) => panic!("unexpected error"),
+ }
// Fails verification with altered offer field
let mut tlv_stream = offer.as_tlv_stream();
let secp_ctx = Secp256k1::new();
let blinded_path = BlindedPath {
- introduction_node_id: pubkey(40),
+ introduction_node: IntroductionNode::NodeId(pubkey(40)),
blinding_point: pubkey(41),
blinded_hops: vec![
BlindedHop { blinded_node_id: pubkey(42), encrypted_payload: vec![0; 43] },
],
};
+ #[cfg(c_bindings)]
+ use super::OfferWithDerivedMetadataBuilder as OfferBuilder;
let offer = OfferBuilder
::deriving_signing_pubkey(desc, node_id, &expanded_key, &entropy, &secp_ctx)
.amount_msats(1000)
let invoice_request = offer.request_invoice(vec![1; 32], payer_pubkey()).unwrap()
.build().unwrap()
.sign(payer_sign).unwrap();
- assert!(invoice_request.verify(&expanded_key, &secp_ctx).is_ok());
+ match invoice_request.verify(&expanded_key, &secp_ctx) {
+ Ok(invoice_request) => assert_eq!(invoice_request.offer_id, offer.id()),
+ Err(_) => panic!("unexpected error"),
+ }
// Fails verification with altered offer field
let mut tlv_stream = offer.as_tlv_stream();
assert_eq!(tlv_stream.amount, Some(1000));
assert_eq!(tlv_stream.currency, None);
+ #[cfg(not(c_bindings))]
let builder = OfferBuilder::new("foo".into(), pubkey(42))
.amount(currency_amount.clone());
+ #[cfg(c_bindings)]
+ let mut builder = OfferBuilder::new("foo".into(), pubkey(42));
+ #[cfg(c_bindings)]
+ builder.amount(currency_amount.clone());
let tlv_stream = builder.offer.as_tlv_stream();
assert_eq!(builder.offer.amount, Some(currency_amount.clone()));
assert_eq!(tlv_stream.amount, Some(10));
fn builds_offer_with_paths() {
let paths = vec![
BlindedPath {
- introduction_node_id: pubkey(40),
+ introduction_node: IntroductionNode::NodeId(pubkey(40)),
blinding_point: pubkey(41),
blinded_hops: vec![
BlindedHop { blinded_node_id: pubkey(43), encrypted_payload: vec![0; 43] },
],
},
BlindedPath {
- introduction_node_id: pubkey(40),
+ introduction_node: IntroductionNode::NodeId(pubkey(40)),
blinding_point: pubkey(41),
blinded_hops: vec![
BlindedHop { blinded_node_id: pubkey(45), encrypted_payload: vec![0; 45] },
fn parses_offer_with_paths() {
let offer = OfferBuilder::new("foo".into(), pubkey(42))
.path(BlindedPath {
- introduction_node_id: pubkey(40),
+ introduction_node: IntroductionNode::NodeId(pubkey(40)),
blinding_point: pubkey(41),
blinded_hops: vec![
BlindedHop { blinded_node_id: pubkey(43), encrypted_payload: vec![0; 43] },
],
})
.path(BlindedPath {
- introduction_node_id: pubkey(40),
+ introduction_node: IntroductionNode::NodeId(pubkey(40)),
blinding_point: pubkey(41),
blinded_hops: vec![
BlindedHop { blinded_node_id: pubkey(45), encrypted_payload: vec![0; 45] },
use bitcoin::bech32;
use bitcoin::secp256k1;
-use core::convert::TryFrom;
use crate::io;
use crate::ln::msgs::DecodeError;
use crate::util::ser::SeekReadable;
+#[allow(unused_imports)]
use crate::prelude::*;
#[cfg(not(fuzzing))]
mod sealed {
use bitcoin::bech32;
use bitcoin::bech32::{FromBase32, ToBase32};
- use core::convert::TryFrom;
use core::fmt;
use super::Bolt12ParseError;
+ #[allow(unused_imports)]
use crate::prelude::*;
/// Indicates a message can be encoded using bech32.
use crate::offers::signer::Metadata;
use crate::util::ser::WithoutLength;
+#[allow(unused_imports)]
use crate::prelude::*;
/// An unpredictable sequence of bytes typically containing information needed to derive
use bitcoin::blockdata::constants::ChainHash;
use bitcoin::network::constants::Network;
use bitcoin::secp256k1::{PublicKey, Secp256k1, self};
-use core::convert::TryFrom;
+use core::hash::{Hash, Hasher};
use core::ops::Deref;
use core::str::FromStr;
use core::time::Duration;
use crate::ln::features::InvoiceRequestFeatures;
use crate::ln::inbound_payment::{ExpandedKey, IV_LEN, Nonce};
use crate::ln::msgs::{DecodeError, MAX_VALUE_MSAT};
-use crate::offers::invoice::{BlindedPayInfo, DerivedSigningPubkey, ExplicitSigningPubkey, InvoiceBuilder};
+use crate::offers::invoice::BlindedPayInfo;
use crate::offers::invoice_request::{InvoiceRequestTlvStream, InvoiceRequestTlvStreamRef};
use crate::offers::offer::{OfferTlvStream, OfferTlvStreamRef};
use crate::offers::parse::{Bech32Encode, Bolt12ParseError, Bolt12SemanticError, ParsedMessage};
use crate::util::ser::{SeekReadable, WithoutLength, Writeable, Writer};
use crate::util::string::PrintableString;
+#[cfg(not(c_bindings))]
+use {
+ crate::offers::invoice::{DerivedSigningPubkey, ExplicitSigningPubkey, InvoiceBuilder},
+};
+#[cfg(c_bindings)]
+use {
+ crate::offers::invoice::{InvoiceWithDerivedSigningPubkeyBuilder, InvoiceWithExplicitSigningPubkeyBuilder},
+};
+
+#[allow(unused_imports)]
use crate::prelude::*;
#[cfg(feature = "std")]
secp_ctx: Option<&'a Secp256k1<T>>,
}
-impl<'a> RefundBuilder<'a, secp256k1::SignOnly> {
+/// Builds a [`Refund`] for the "offer for money" flow.
+///
+/// See [module-level documentation] for usage.
+///
+/// [module-level documentation]: self
+#[cfg(c_bindings)]
+pub struct RefundMaybeWithDerivedMetadataBuilder<'a> {
+ refund: RefundContents,
+ secp_ctx: Option<&'a Secp256k1<secp256k1::All>>,
+}
+
+macro_rules! refund_explicit_metadata_builder_methods { () => {
/// Creates a new builder for a refund using the [`Refund::payer_id`] for the public node id to
/// send to if no [`Refund::paths`] are set. Otherwise, it may be a transient pubkey.
///
secp_ctx: None,
})
}
-}
+} }
-impl<'a, T: secp256k1::Signing> RefundBuilder<'a, T> {
+macro_rules! refund_builder_methods { (
+ $self: ident, $self_type: ty, $return_type: ty, $return_value: expr, $secp_context: ty $(, $self_mut: tt)?
+) => {
/// Similar to [`RefundBuilder::new`] except, if [`RefundBuilder::path`] is called, the payer id
/// is derived from the given [`ExpandedKey`] and nonce. This provides sender privacy by using a
/// different payer id for each refund, assuming a different nonce is used. Otherwise, the
/// [`ExpandedKey`]: crate::ln::inbound_payment::ExpandedKey
pub fn deriving_payer_id<ES: Deref>(
description: String, node_id: PublicKey, expanded_key: &ExpandedKey, entropy_source: ES,
- secp_ctx: &'a Secp256k1<T>, amount_msats: u64, payment_id: PaymentId
+ secp_ctx: &'a Secp256k1<$secp_context>, amount_msats: u64, payment_id: PaymentId
) -> Result<Self, Bolt12SemanticError> where ES::Target: EntropySource {
if amount_msats > MAX_VALUE_MSAT {
return Err(Bolt12SemanticError::InvalidAmount);
/// already passed is valid and can be checked for using [`Refund::is_expired`].
///
/// Successive calls to this method will override the previous setting.
- pub fn absolute_expiry(mut self, absolute_expiry: Duration) -> Self {
- self.refund.absolute_expiry = Some(absolute_expiry);
- self
+ pub fn absolute_expiry($($self_mut)* $self: $self_type, absolute_expiry: Duration) -> $return_type {
+ $self.refund.absolute_expiry = Some(absolute_expiry);
+ $return_value
}
/// Sets the [`Refund::issuer`].
///
/// Successive calls to this method will override the previous setting.
- pub fn issuer(mut self, issuer: String) -> Self {
- self.refund.issuer = Some(issuer);
- self
+ pub fn issuer($($self_mut)* $self: $self_type, issuer: String) -> $return_type {
+ $self.refund.issuer = Some(issuer);
+ $return_value
}
/// Adds a blinded path to [`Refund::paths`]. Must include at least one path if only connected
///
/// Successive calls to this method will add another blinded path. Caller is responsible for not
/// adding duplicate paths.
- pub fn path(mut self, path: BlindedPath) -> Self {
- self.refund.paths.get_or_insert_with(Vec::new).push(path);
- self
+ pub fn path($($self_mut)* $self: $self_type, path: BlindedPath) -> $return_type {
+ $self.refund.paths.get_or_insert_with(Vec::new).push(path);
+ $return_value
}
/// Sets the [`Refund::chain`] of the given [`Network`] for paying an invoice. If not
/// called, [`Network::Bitcoin`] is assumed.
///
/// Successive calls to this method will override the previous setting.
- pub fn chain(self, network: Network) -> Self {
- self.chain_hash(ChainHash::using_genesis_block(network))
+ pub fn chain($self: $self_type, network: Network) -> $return_type {
+ $self.chain_hash(ChainHash::using_genesis_block(network))
}
/// Sets the [`Refund::chain`] of the given [`ChainHash`] for paying an invoice. If not called,
/// [`Network::Bitcoin`] is assumed.
///
/// Successive calls to this method will override the previous setting.
- pub(crate) fn chain_hash(mut self, chain: ChainHash) -> Self {
- self.refund.chain = Some(chain);
- self
+ pub(crate) fn chain_hash($($self_mut)* $self: $self_type, chain: ChainHash) -> $return_type {
+ $self.refund.chain = Some(chain);
+ $return_value
}
/// Sets [`Refund::quantity`] of items. This is purely for informational purposes. It is useful
/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
/// [`InvoiceRequest::quantity`]: crate::offers::invoice_request::InvoiceRequest::quantity
/// [`Offer`]: crate::offers::offer::Offer
- pub fn quantity(mut self, quantity: u64) -> Self {
- self.refund.quantity = Some(quantity);
- self
+ pub fn quantity($($self_mut)* $self: $self_type, quantity: u64) -> $return_type {
+ $self.refund.quantity = Some(quantity);
+ $return_value
}
/// Sets the [`Refund::payer_note`].
///
/// Successive calls to this method will override the previous setting.
- pub fn payer_note(mut self, payer_note: String) -> Self {
- self.refund.payer_note = Some(payer_note);
- self
+ pub fn payer_note($($self_mut)* $self: $self_type, payer_note: String) -> $return_type {
+ $self.refund.payer_note = Some(payer_note);
+ $return_value
}
/// Builds a [`Refund`] after checking for valid semantics.
- pub fn build(mut self) -> Result<Refund, Bolt12SemanticError> {
- if self.refund.chain() == self.refund.implied_chain() {
- self.refund.chain = None;
+ pub fn build($($self_mut)* $self: $self_type) -> Result<Refund, Bolt12SemanticError> {
+ if $self.refund.chain() == $self.refund.implied_chain() {
+ $self.refund.chain = None;
}
// Create the metadata for stateless verification of a Bolt12Invoice.
- if self.refund.payer.0.has_derivation_material() {
- let mut metadata = core::mem::take(&mut self.refund.payer.0);
+ if $self.refund.payer.0.has_derivation_material() {
+ let mut metadata = core::mem::take(&mut $self.refund.payer.0);
- if self.refund.paths.is_none() {
+ if $self.refund.paths.is_none() {
metadata = metadata.without_keys();
}
- let mut tlv_stream = self.refund.as_tlv_stream();
+ let mut tlv_stream = $self.refund.as_tlv_stream();
tlv_stream.0.metadata = None;
if metadata.derives_payer_keys() {
tlv_stream.2.payer_id = None;
}
- let (derived_metadata, keys) = metadata.derive_from(tlv_stream, self.secp_ctx);
+ let (derived_metadata, keys) = metadata.derive_from(tlv_stream, $self.secp_ctx);
metadata = derived_metadata;
if let Some(keys) = keys {
- self.refund.payer_id = keys.public_key();
+ $self.refund.payer_id = keys.public_key();
}
- self.refund.payer.0 = metadata;
+ $self.refund.payer.0 = metadata;
}
let mut bytes = Vec::new();
- self.refund.write(&mut bytes).unwrap();
+ $self.refund.write(&mut bytes).unwrap();
+
+ Ok(Refund {
+ bytes,
+ #[cfg(not(c_bindings))]
+ contents: $self.refund,
+ #[cfg(c_bindings)]
+ contents: $self.refund.clone(),
+ })
+ }
+} }
- Ok(Refund { bytes, contents: self.refund })
+#[cfg(test)]
+macro_rules! refund_builder_test_methods { (
+ $self: ident, $self_type: ty, $return_type: ty, $return_value: expr $(, $self_mut: tt)?
+) => {
+ #[cfg_attr(c_bindings, allow(dead_code))]
+ pub(crate) fn clear_paths($($self_mut)* $self: $self_type) -> $return_type {
+ $self.refund.paths = None;
+ $return_value
+ }
+
+ #[cfg_attr(c_bindings, allow(dead_code))]
+ fn features_unchecked($($self_mut)* $self: $self_type, features: InvoiceRequestFeatures) -> $return_type {
+ $self.refund.features = features;
+ $return_value
}
+} }
+
+impl<'a> RefundBuilder<'a, secp256k1::SignOnly> {
+ refund_explicit_metadata_builder_methods!();
}
-#[cfg(test)]
impl<'a, T: secp256k1::Signing> RefundBuilder<'a, T> {
- fn features_unchecked(mut self, features: InvoiceRequestFeatures) -> Self {
- self.refund.features = features;
- self
+ refund_builder_methods!(self, Self, Self, self, T, mut);
+
+ #[cfg(test)]
+ refund_builder_test_methods!(self, Self, Self, self, mut);
+}
+
+#[cfg(all(c_bindings, not(test)))]
+impl<'a> RefundMaybeWithDerivedMetadataBuilder<'a> {
+ refund_explicit_metadata_builder_methods!();
+ refund_builder_methods!(self, &mut Self, (), (), secp256k1::All);
+}
+
+#[cfg(all(c_bindings, test))]
+impl<'a> RefundMaybeWithDerivedMetadataBuilder<'a> {
+ refund_explicit_metadata_builder_methods!();
+ refund_builder_methods!(self, &mut Self, &mut Self, self, secp256k1::All);
+ refund_builder_test_methods!(self, &mut Self, &mut Self, self);
+}
+
+#[cfg(c_bindings)]
+impl<'a> From<RefundBuilder<'a, secp256k1::All>>
+for RefundMaybeWithDerivedMetadataBuilder<'a> {
+ fn from(builder: RefundBuilder<'a, secp256k1::All>) -> Self {
+ let RefundBuilder { refund, secp_ctx } = builder;
+
+ Self { refund, secp_ctx }
+ }
+}
+
+#[cfg(c_bindings)]
+impl<'a> From<RefundMaybeWithDerivedMetadataBuilder<'a>>
+for RefundBuilder<'a, secp256k1::All> {
+ fn from(builder: RefundMaybeWithDerivedMetadataBuilder<'a>) -> Self {
+ let RefundMaybeWithDerivedMetadataBuilder { refund, secp_ctx } = builder;
+
+ Self { refund, secp_ctx }
}
}
/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
/// [`Offer`]: crate::offers::offer::Offer
#[derive(Clone, Debug)]
-#[cfg_attr(test, derive(PartialEq))]
pub struct Refund {
pub(super) bytes: Vec<u8>,
pub(super) contents: RefundContents,
pub fn payer_note(&self) -> Option<PrintableString> {
self.contents.payer_note()
}
+}
+macro_rules! respond_with_explicit_signing_pubkey_methods { ($self: ident, $builder: ty) => {
/// Creates an [`InvoiceBuilder`] for the refund with the given required fields and using the
/// [`Duration`] since [`std::time::SystemTime::UNIX_EPOCH`] as the creation time.
///
/// [`Duration`]: core::time::Duration
#[cfg(feature = "std")]
pub fn respond_with(
- &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash,
+ &$self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash,
signing_pubkey: PublicKey,
- ) -> Result<InvoiceBuilder<ExplicitSigningPubkey>, Bolt12SemanticError> {
+ ) -> Result<$builder, Bolt12SemanticError> {
let created_at = std::time::SystemTime::now()
.duration_since(std::time::SystemTime::UNIX_EPOCH)
.expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
- self.respond_with_no_std(payment_paths, payment_hash, signing_pubkey, created_at)
+ $self.respond_with_no_std(payment_paths, payment_hash, signing_pubkey, created_at)
}
/// Creates an [`InvoiceBuilder`] for the refund with the given required fields.
///
/// [`Bolt12Invoice::created_at`]: crate::offers::invoice::Bolt12Invoice::created_at
pub fn respond_with_no_std(
- &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash,
+ &$self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash,
signing_pubkey: PublicKey, created_at: Duration
- ) -> Result<InvoiceBuilder<ExplicitSigningPubkey>, Bolt12SemanticError> {
- if self.features().requires_unknown_bits() {
+ ) -> Result<$builder, Bolt12SemanticError> {
+ if $self.features().requires_unknown_bits() {
return Err(Bolt12SemanticError::UnknownRequiredFeatures);
}
- InvoiceBuilder::for_refund(self, payment_paths, created_at, payment_hash, signing_pubkey)
+ <$builder>::for_refund($self, payment_paths, created_at, payment_hash, signing_pubkey)
}
+} }
+macro_rules! respond_with_derived_signing_pubkey_methods { ($self: ident, $builder: ty) => {
/// Creates an [`InvoiceBuilder`] for the refund using the given required fields and that uses
/// derived signing keys to sign the [`Bolt12Invoice`].
///
/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
#[cfg(feature = "std")]
pub fn respond_using_derived_keys<ES: Deref>(
- &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash,
+ &$self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash,
expanded_key: &ExpandedKey, entropy_source: ES
- ) -> Result<InvoiceBuilder<DerivedSigningPubkey>, Bolt12SemanticError>
+ ) -> Result<$builder, Bolt12SemanticError>
where
ES::Target: EntropySource,
{
.duration_since(std::time::SystemTime::UNIX_EPOCH)
.expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
- self.respond_using_derived_keys_no_std(
+ $self.respond_using_derived_keys_no_std(
payment_paths, payment_hash, created_at, expanded_key, entropy_source
)
}
///
/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
pub fn respond_using_derived_keys_no_std<ES: Deref>(
- &self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash,
+ &$self, payment_paths: Vec<(BlindedPayInfo, BlindedPath)>, payment_hash: PaymentHash,
created_at: core::time::Duration, expanded_key: &ExpandedKey, entropy_source: ES
- ) -> Result<InvoiceBuilder<DerivedSigningPubkey>, Bolt12SemanticError>
+ ) -> Result<$builder, Bolt12SemanticError>
where
ES::Target: EntropySource,
{
- if self.features().requires_unknown_bits() {
+ if $self.features().requires_unknown_bits() {
return Err(Bolt12SemanticError::UnknownRequiredFeatures);
}
let nonce = Nonce::from_entropy_source(entropy_source);
let keys = signer::derive_keys(nonce, expanded_key);
- InvoiceBuilder::for_refund_using_keys(self, payment_paths, created_at, payment_hash, keys)
+ <$builder>::for_refund_using_keys($self, payment_paths, created_at, payment_hash, keys)
}
+} }
- #[cfg(test)]
+#[cfg(not(c_bindings))]
+impl Refund {
+ respond_with_explicit_signing_pubkey_methods!(self, InvoiceBuilder<ExplicitSigningPubkey>);
+ respond_with_derived_signing_pubkey_methods!(self, InvoiceBuilder<DerivedSigningPubkey>);
+}
+
+#[cfg(c_bindings)]
+impl Refund {
+ respond_with_explicit_signing_pubkey_methods!(self, InvoiceWithExplicitSigningPubkeyBuilder);
+ respond_with_derived_signing_pubkey_methods!(self, InvoiceWithDerivedSigningPubkeyBuilder);
+}
+
+#[cfg(test)]
+impl Refund {
fn as_tlv_stream(&self) -> RefundTlvStreamRef {
self.contents.as_tlv_stream()
}
}
}
+impl PartialEq for Refund {
+ fn eq(&self, other: &Self) -> bool {
+ self.bytes.eq(&other.bytes)
+ }
+}
+
+impl Eq for Refund {}
+
+impl Hash for Refund {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.bytes.hash(state);
+ }
+}
+
impl RefundContents {
pub fn description(&self) -> PrintableString {
PrintableString(&self.description)
#[cfg(test)]
mod tests {
- use super::{Refund, RefundBuilder, RefundTlvStreamRef};
+ use super::{Refund, RefundTlvStreamRef};
+ #[cfg(not(c_bindings))]
+ use {
+ super::RefundBuilder,
+ };
+ #[cfg(c_bindings)]
+ use {
+ super::RefundMaybeWithDerivedMetadataBuilder as RefundBuilder,
+ };
use bitcoin::blockdata::constants::ChainHash;
use bitcoin::network::constants::Network;
use bitcoin::secp256k1::{KeyPair, Secp256k1, SecretKey};
- use core::convert::TryFrom;
+
use core::time::Duration;
- use crate::blinded_path::{BlindedHop, BlindedPath};
+
+ use crate::blinded_path::{BlindedHop, BlindedPath, IntroductionNode};
use crate::sign::KeyMaterial;
use crate::ln::channelmanager::PaymentId;
use crate::ln::features::{InvoiceRequestFeatures, OfferFeatures};
use crate::offers::test_utils::*;
use crate::util::ser::{BigSize, Writeable};
use crate::util::string::PrintableString;
+ use crate::prelude::*;
trait ToBytes {
fn to_bytes(&self) -> Vec<u8>;
let payment_id = PaymentId([1; 32]);
let blinded_path = BlindedPath {
- introduction_node_id: pubkey(40),
+ introduction_node: IntroductionNode::NodeId(pubkey(40)),
blinding_point: pubkey(41),
blinded_hops: vec![
BlindedHop { blinded_node_id: pubkey(43), encrypted_payload: vec![0; 43] },
fn builds_refund_with_paths() {
let paths = vec![
BlindedPath {
- introduction_node_id: pubkey(40),
+ introduction_node: IntroductionNode::NodeId(pubkey(40)),
blinding_point: pubkey(41),
blinded_hops: vec![
BlindedHop { blinded_node_id: pubkey(43), encrypted_payload: vec![0; 43] },
],
},
BlindedPath {
- introduction_node_id: pubkey(40),
+ introduction_node: IntroductionNode::NodeId(pubkey(40)),
blinding_point: pubkey(41),
blinded_hops: vec![
BlindedHop { blinded_node_id: pubkey(45), encrypted_payload: vec![0; 45] },
let past_expiry = Duration::from_secs(0);
let paths = vec![
BlindedPath {
- introduction_node_id: pubkey(40),
+ introduction_node: IntroductionNode::NodeId(pubkey(40)),
blinding_point: pubkey(41),
blinded_hops: vec![
BlindedHop { blinded_node_id: pubkey(43), encrypted_payload: vec![0; 43] },
],
},
BlindedPath {
- introduction_node_id: pubkey(40),
+ introduction_node: IntroductionNode::NodeId(pubkey(40)),
blinding_point: pubkey(41),
blinded_hops: vec![
BlindedHop { blinded_node_id: pubkey(45), encrypted_payload: vec![0; 45] },
use bitcoin::hashes::hmac::{Hmac, HmacEngine};
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::secp256k1::{KeyPair, PublicKey, Secp256k1, SecretKey, self};
-use core::convert::TryFrom;
use core::fmt;
use crate::ln::channelmanager::PaymentId;
use crate::ln::inbound_payment::{ExpandedKey, IV_LEN, Nonce};
use bitcoin::secp256k1::{KeyPair, PublicKey, Secp256k1, SecretKey};
use bitcoin::secp256k1::schnorr::Signature;
-use core::convert::{AsRef, Infallible};
+
use core::time::Duration;
-use crate::blinded_path::{BlindedHop, BlindedPath};
+use crate::blinded_path::{BlindedHop, BlindedPath, IntroductionNode};
use crate::sign::EntropySource;
use crate::ln::PaymentHash;
use crate::ln::features::BlindedHopFeatures;
use crate::offers::invoice::BlindedPayInfo;
use crate::offers::merkle::TaggedHash;
+#[allow(unused_imports)]
+use crate::prelude::*;
+
+pub(crate) fn fail_sign<T: AsRef<TaggedHash>>(_message: &T) -> Result<Signature, ()> {
+ Err(())
+}
+
pub(crate) fn payer_keys() -> KeyPair {
let secp_ctx = Secp256k1::new();
KeyPair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap())
}
-pub(crate) fn payer_sign<T: AsRef<TaggedHash>>(message: &T) -> Result<Signature, Infallible> {
+pub(crate) fn payer_sign<T: AsRef<TaggedHash>>(message: &T) -> Result<Signature, ()> {
let secp_ctx = Secp256k1::new();
let keys = KeyPair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
KeyPair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[43; 32]).unwrap())
}
-pub(crate) fn recipient_sign<T: AsRef<TaggedHash>>(message: &T) -> Result<Signature, Infallible> {
+pub(crate) fn recipient_sign<T: AsRef<TaggedHash>>(message: &T) -> Result<Signature, ()> {
let secp_ctx = Secp256k1::new();
let keys = KeyPair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[43; 32]).unwrap());
Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
pub(crate) fn payment_paths() -> Vec<(BlindedPayInfo, BlindedPath)> {
let paths = vec![
BlindedPath {
- introduction_node_id: pubkey(40),
+ introduction_node: IntroductionNode::NodeId(pubkey(40)),
blinding_point: pubkey(41),
blinded_hops: vec![
BlindedHop { blinded_node_id: pubkey(43), encrypted_payload: vec![0; 43] },
],
},
BlindedPath {
- introduction_node_id: pubkey(40),
+ introduction_node: IntroductionNode::NodeId(pubkey(40)),
blinding_point: pubkey(41),
blinded_hops: vec![
BlindedHop { blinded_node_id: pubkey(45), encrypted_payload: vec![0; 45] },
//! Onion message testing and test utilities live here.
-use crate::blinded_path::BlindedPath;
+use crate::blinded_path::{BlindedPath, EmptyNodeIdLookUp};
use crate::events::{Event, EventsProvider};
-use crate::ln::features::InitFeatures;
-use crate::ln::msgs::{self, DecodeError, OnionMessageHandler, SocketAddress};
-use crate::sign::{EntropySource, NodeSigner, Recipient};
+use crate::ln::features::{ChannelFeatures, InitFeatures};
+use crate::ln::msgs::{self, DecodeError, OnionMessageHandler};
+use crate::routing::gossip::{NetworkGraph, P2PGossipSync};
+use crate::routing::test_utils::{add_channel, add_or_update_node};
+use crate::sign::{NodeSigner, Recipient};
use crate::util::ser::{FixedLengthReader, LengthReadable, Writeable, Writer};
use crate::util::test_utils;
-use super::messenger::{CustomOnionMessageHandler, Destination, MessageRouter, OnionMessagePath, OnionMessenger, PendingOnionMessage, SendError};
+use super::messenger::{CustomOnionMessageHandler, DefaultMessageRouter, Destination, OnionMessagePath, OnionMessenger, PendingOnionMessage, SendError};
use super::offers::{OffersMessage, OffersMessageHandler};
use super::packet::{OnionMessageContents, Packet};
use bitcoin::network::constants::Network;
use bitcoin::hashes::hex::FromHex;
-use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey, self};
+use bitcoin::secp256k1::{All, PublicKey, Secp256k1, SecretKey};
use crate::io;
use crate::io_extras::read_to_end;
use crate::sync::{Arc, Mutex};
+use core::ops::Deref;
+
use crate::prelude::*;
struct MessengerNode {
node_id: PublicKey,
+ privkey: SecretKey,
entropy_source: Arc<test_utils::TestKeysInterface>,
messenger: OnionMessenger<
Arc<test_utils::TestKeysInterface>,
Arc<test_utils::TestNodeSigner>,
Arc<test_utils::TestLogger>,
- Arc<TestMessageRouter>,
+ Arc<EmptyNodeIdLookUp>,
+ Arc<DefaultMessageRouter<
+ Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
+ Arc<test_utils::TestLogger>,
+ Arc<test_utils::TestKeysInterface>
+ >>,
Arc<TestOffersMessageHandler>,
Arc<TestCustomMessageHandler>
>,
custom_message_handler: Arc<TestCustomMessageHandler>,
-}
-
-struct TestMessageRouter {}
-
-impl MessageRouter for TestMessageRouter {
- fn find_path(
- &self, _sender: PublicKey, _peers: Vec<PublicKey>, destination: Destination
- ) -> Result<OnionMessagePath, ()> {
- Ok(OnionMessagePath {
- intermediate_nodes: vec![],
- destination,
- first_node_addresses:
- Some(vec![SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1000 }]),
- })
- }
-
- fn create_blinded_paths<
- ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
- >(
- &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _entropy_source: &ES,
- _secp_ctx: &Secp256k1<T>
- ) -> Result<Vec<BlindedPath>, ()> {
- unreachable!()
- }
+ gossip_sync: Arc<P2PGossipSync<
+ Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
+ Arc<test_utils::TestChainSource>,
+ Arc<test_utils::TestLogger>
+ >>
}
struct TestOffersMessageHandler {}
}
fn create_nodes_using_secrets(secrets: Vec<SecretKey>) -> Vec<MessengerNode> {
+ let gossip_logger = Arc::new(test_utils::TestLogger::with_id("gossip".to_string()));
+ let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, gossip_logger.clone()));
+ let gossip_sync = Arc::new(
+ P2PGossipSync::new(network_graph.clone(), None, gossip_logger)
+ );
+
let mut nodes = Vec::new();
for (i, secret_key) in secrets.into_iter().enumerate() {
let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
let entropy_source = Arc::new(test_utils::TestKeysInterface::new(&seed, Network::Testnet));
let node_signer = Arc::new(test_utils::TestNodeSigner::new(secret_key));
- let message_router = Arc::new(TestMessageRouter {});
+ let node_id_lookup = Arc::new(EmptyNodeIdLookUp {});
+ let message_router = Arc::new(
+ DefaultMessageRouter::new(network_graph.clone(), entropy_source.clone())
+ );
let offers_message_handler = Arc::new(TestOffersMessageHandler {});
let custom_message_handler = Arc::new(TestCustomMessageHandler::new());
nodes.push(MessengerNode {
+ privkey: secret_key,
node_id: node_signer.get_node_id(Recipient::Node).unwrap(),
entropy_source: entropy_source.clone(),
messenger: OnionMessenger::new(
- entropy_source, node_signer, logger.clone(), message_router,
+ entropy_source, node_signer, logger.clone(), node_id_lookup, message_router,
offers_message_handler, custom_message_handler.clone()
),
custom_message_handler,
+ gossip_sync: gossip_sync.clone(),
});
}
for i in 0..nodes.len() - 1 {
events.into_inner()
}
+fn add_channel_to_graph(
+ node_a: &MessengerNode, node_b: &MessengerNode, secp_ctx: &Secp256k1<All>, short_channel_id: u64
+) {
+ let gossip_sync = node_a.gossip_sync.deref();
+ let privkey_a = &node_a.privkey;
+ let privkey_b = &node_b.privkey;
+ let channel_features = ChannelFeatures::empty();
+ let node_features_a = node_a.messenger.provided_node_features();
+ let node_features_b = node_b.messenger.provided_node_features();
+ add_channel(gossip_sync, secp_ctx, privkey_a, privkey_b, channel_features, short_channel_id);
+ add_or_update_node(gossip_sync, secp_ctx, privkey_a, node_features_a, 1);
+ add_or_update_node(gossip_sync, secp_ctx, privkey_b, node_features_b, 1);
+}
+
fn pass_along_path(path: &Vec<MessengerNode>) {
let mut prev_node = &path[0];
for node in path.into_iter().skip(1) {
let nodes = create_nodes(2);
let test_msg = TestCustomMessage::Response;
- let path = OnionMessagePath {
- intermediate_nodes: vec![],
- destination: Destination::Node(nodes[1].node_id),
- first_node_addresses: None,
- };
- nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
+ let destination = Destination::Node(nodes[1].node_id);
+ nodes[0].messenger.send_onion_message(test_msg, destination, None).unwrap();
nodes[1].custom_message_handler.expect_message(TestCustomMessage::Response);
pass_along_path(&nodes);
}
destination: Destination::Node(nodes[2].node_id),
first_node_addresses: None,
};
+
nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
nodes[2].custom_message_handler.expect_message(TestCustomMessage::Response);
pass_along_path(&nodes);
let secp_ctx = Secp256k1::new();
let blinded_path = BlindedPath::new_for_message(&[nodes[1].node_id], &*nodes[1].entropy_source, &secp_ctx).unwrap();
- let path = OnionMessagePath {
- intermediate_nodes: vec![],
- destination: Destination::BlindedPath(blinded_path),
- first_node_addresses: None,
- };
- nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
+ let destination = Destination::BlindedPath(blinded_path);
+ nodes[0].messenger.send_onion_message(test_msg, destination, None).unwrap();
nodes[1].custom_message_handler.expect_message(TestCustomMessage::Response);
pass_along_path(&nodes);
}
let secp_ctx = Secp256k1::new();
let blinded_path = BlindedPath::new_for_message(&[nodes[1].node_id, nodes[2].node_id, nodes[3].node_id], &*nodes[3].entropy_source, &secp_ctx).unwrap();
- let path = OnionMessagePath {
- intermediate_nodes: vec![],
- destination: Destination::BlindedPath(blinded_path),
- first_node_addresses: None,
- };
+ let destination = Destination::BlindedPath(blinded_path);
- nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
+ nodes[0].messenger.send_onion_message(test_msg, destination, None).unwrap();
nodes[3].custom_message_handler.expect_message(TestCustomMessage::Response);
pass_along_path(&nodes);
}
let secp_ctx = Secp256k1::new();
let blinded_path = BlindedPath::new_for_message(&[nodes[0].node_id, nodes[1].node_id, nodes[2].node_id], &*nodes[2].entropy_source, &secp_ctx).unwrap();
- let path = OnionMessagePath {
- intermediate_nodes: vec![],
- destination: Destination::BlindedPath(blinded_path),
- first_node_addresses: None,
- };
+ let destination = Destination::BlindedPath(blinded_path);
- nodes[0].messenger.send_onion_message_using_path(path, test_msg.clone(), None).unwrap();
+ nodes[0].messenger.send_onion_message(test_msg.clone(), destination, None).unwrap();
nodes[2].custom_message_handler.expect_message(TestCustomMessage::Response);
pass_along_path(&nodes);
// Try with a two-hop blinded path where we are the introduction node.
let blinded_path = BlindedPath::new_for_message(&[nodes[0].node_id, nodes[1].node_id], &*nodes[1].entropy_source, &secp_ctx).unwrap();
- let path = OnionMessagePath {
- intermediate_nodes: vec![],
- destination: Destination::BlindedPath(blinded_path),
- first_node_addresses: None,
- };
- nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
+ let destination = Destination::BlindedPath(blinded_path);
+ nodes[0].messenger.send_onion_message(test_msg, destination, None).unwrap();
nodes[1].custom_message_handler.expect_message(TestCustomMessage::Response);
nodes.remove(2);
pass_along_path(&nodes);
let secp_ctx = Secp256k1::new();
let mut blinded_path = BlindedPath::new_for_message(&[nodes[1].node_id, nodes[2].node_id], &*nodes[2].entropy_source, &secp_ctx).unwrap();
blinded_path.blinded_hops.clear();
- let path = OnionMessagePath {
- intermediate_nodes: vec![],
- destination: Destination::BlindedPath(blinded_path),
- first_node_addresses: None,
- };
- let err = nodes[0].messenger.send_onion_message_using_path(path, test_msg.clone(), None).unwrap_err();
+ let destination = Destination::BlindedPath(blinded_path);
+ let err = nodes[0].messenger.send_onion_message(test_msg, destination, None).unwrap_err();
assert_eq!(err, SendError::TooFewBlindedHops);
}
// Destination::BlindedPath
let blinded_path = BlindedPath::new_for_message(&[nodes[1].node_id, nodes[2].node_id, nodes[3].node_id], &*nodes[3].entropy_source, &secp_ctx).unwrap();
- let path = OnionMessagePath {
- intermediate_nodes: vec![],
- destination: Destination::BlindedPath(blinded_path),
- first_node_addresses: None,
- };
+ let destination = Destination::BlindedPath(blinded_path);
let reply_path = BlindedPath::new_for_message(&[nodes[2].node_id, nodes[1].node_id, nodes[0].node_id], &*nodes[0].entropy_source, &secp_ctx).unwrap();
- nodes[0].messenger.send_onion_message_using_path(path, test_msg, Some(reply_path)).unwrap();
+ nodes[0].messenger.send_onion_message(test_msg, destination, Some(reply_path)).unwrap();
nodes[3].custom_message_handler.expect_message(TestCustomMessage::Request);
pass_along_path(&nodes);
}
let test_msg = InvalidCustomMessage {};
- let path = OnionMessagePath {
- intermediate_nodes: vec![],
- destination: Destination::Node(nodes[1].node_id),
- first_node_addresses: None,
- };
- let err = nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap_err();
+ let destination = Destination::Node(nodes[1].node_id);
+ let err = nodes[0].messenger.send_onion_message(test_msg, destination, None).unwrap_err();
assert_eq!(err, SendError::InvalidMessage);
}
fn peer_buffer_full() {
let nodes = create_nodes(2);
let test_msg = TestCustomMessage::Request;
- let path = OnionMessagePath {
- intermediate_nodes: vec![],
- destination: Destination::Node(nodes[1].node_id),
- first_node_addresses: None,
- };
+ let destination = Destination::Node(nodes[1].node_id);
for _ in 0..188 { // Based on MAX_PER_PEER_BUFFER_SIZE in OnionMessenger
- nodes[0].messenger.send_onion_message_using_path(path.clone(), test_msg.clone(), None).unwrap();
+ nodes[0].messenger.send_onion_message(test_msg.clone(), destination.clone(), None).unwrap();
}
- let err = nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap_err();
+ let err = nodes[0].messenger.send_onion_message(test_msg, destination, None).unwrap_err();
assert_eq!(err, SendError::BufferFull);
}
let nodes = create_nodes(3);
let message = TestCustomMessage::Request;
let secp_ctx = Secp256k1::new();
+ add_channel_to_graph(&nodes[0], &nodes[1], &secp_ctx, 42);
+
let blinded_path = BlindedPath::new_for_message(
&[nodes[1].node_id, nodes[2].node_id], &*nodes[0].entropy_source, &secp_ctx
).unwrap();
let nodes = create_nodes(3);
let message = TestCustomMessage::Request;
let secp_ctx = Secp256k1::new();
+ add_channel_to_graph(&nodes[0], &nodes[1], &secp_ctx, 42);
+
let blinded_path = BlindedPath::new_for_message(
&[nodes[1].node_id, nodes[2].node_id], &*nodes[0].entropy_source, &secp_ctx
).unwrap();
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::secp256k1::{self, PublicKey, Scalar, Secp256k1, SecretKey};
-use crate::blinded_path::BlindedPath;
-use crate::blinded_path::message::{advance_path_by_one, ForwardTlvs, ReceiveTlvs};
+use crate::blinded_path::{BlindedPath, IntroductionNode, NodeIdLookUp};
+use crate::blinded_path::message::{advance_path_by_one, ForwardTlvs, NextHop, ReceiveTlvs};
use crate::blinded_path::utils;
use crate::events::{Event, EventHandler, EventsProvider};
use crate::sign::{EntropySource, NodeSigner, Recipient};
-#[cfg(not(c_bindings))]
-use crate::ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager};
use crate::ln::features::{InitFeatures, NodeFeatures};
use crate::ln::msgs::{self, OnionMessage, OnionMessageHandler, SocketAddress};
use crate::ln::onion_utils;
-use crate::routing::gossip::{NetworkGraph, NodeId};
+use crate::routing::gossip::{NetworkGraph, NodeId, ReadOnlyNetworkGraph};
use super::packet::OnionMessageContents;
use super::packet::ParsedOnionMessageContents;
use super::offers::OffersMessageHandler;
use super::packet::{BIG_PACKET_HOP_DATA_LEN, ForwardControlTlvs, Packet, Payload, ReceiveControlTlvs, SMALL_PACKET_HOP_DATA_LEN};
-use crate::util::logger::Logger;
+use crate::util::logger::{Logger, WithContext};
use crate::util::ser::Writeable;
use core::fmt;
#[cfg(not(c_bindings))]
use {
crate::sign::KeysManager,
+ crate::ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager},
crate::ln::peer_handler::IgnoringMessageHandler,
crate::sync::Arc,
};
/// # use bitcoin::hashes::_export::_core::time::Duration;
/// # use bitcoin::hashes::hex::FromHex;
/// # use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey, self};
-/// # use lightning::blinded_path::BlindedPath;
+/// # use lightning::blinded_path::{BlindedPath, EmptyNodeIdLookUp};
/// # use lightning::sign::{EntropySource, KeysManager};
/// # use lightning::ln::peer_handler::IgnoringMessageHandler;
/// # use lightning::onion_message::messenger::{Destination, MessageRouter, OnionMessagePath, OnionMessenger};
/// # first_node_addresses: None,
/// # })
/// # }
-/// # fn create_blinded_paths<ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification>(
-/// # &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _entropy_source: &ES, _secp_ctx: &Secp256k1<T>
+/// # fn create_blinded_paths<T: secp256k1::Signing + secp256k1::Verification>(
+/// # &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _secp_ctx: &Secp256k1<T>
/// # ) -> Result<Vec<BlindedPath>, ()> {
/// # unreachable!()
/// # }
/// # let hop_node_id1 = PublicKey::from_secret_key(&secp_ctx, &node_secret);
/// # let (hop_node_id3, hop_node_id4) = (hop_node_id1, hop_node_id1);
/// # let destination_node_id = hop_node_id1;
+/// # let node_id_lookup = EmptyNodeIdLookUp {};
/// # let message_router = Arc::new(FakeMessageRouter {});
/// # let custom_message_handler = IgnoringMessageHandler {};
/// # let offers_message_handler = IgnoringMessageHandler {};
/// // Create the onion messenger. This must use the same `keys_manager` as is passed to your
/// // ChannelManager.
/// let onion_messenger = OnionMessenger::new(
-/// &keys_manager, &keys_manager, logger, message_router, &offers_message_handler,
-/// &custom_message_handler
+/// &keys_manager, &keys_manager, logger, &node_id_lookup, message_router,
+/// &offers_message_handler, &custom_message_handler
/// );
/// # #[derive(Debug)]
///
/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
-pub struct OnionMessenger<ES: Deref, NS: Deref, L: Deref, MR: Deref, OMH: Deref, CMH: Deref>
+pub struct OnionMessenger<ES: Deref, NS: Deref, L: Deref, NL: Deref, MR: Deref, OMH: Deref, CMH: Deref>
where
ES::Target: EntropySource,
NS::Target: NodeSigner,
L::Target: Logger,
+ NL::Target: NodeIdLookUp,
MR::Target: MessageRouter,
OMH::Target: OffersMessageHandler,
CMH::Target: CustomOnionMessageHandler,
logger: L,
message_recipients: Mutex<HashMap<PublicKey, OnionMessageRecipient>>,
secp_ctx: Secp256k1<secp256k1::All>,
+ node_id_lookup: NL,
message_router: MR,
offers_handler: OMH,
custom_handler: CMH,
/// Creates [`BlindedPath`]s to the `recipient` node. The nodes in `peers` are assumed to be
/// direct peers with the `recipient`.
fn create_blinded_paths<
- ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+ T: secp256k1::Signing + secp256k1::Verification
>(
- &self, recipient: PublicKey, peers: Vec<PublicKey>, entropy_source: &ES,
- secp_ctx: &Secp256k1<T>
+ &self, recipient: PublicKey, peers: Vec<PublicKey>, secp_ctx: &Secp256k1<T>,
) -> Result<Vec<BlindedPath>, ()>;
}
/// A [`MessageRouter`] that can only route to a directly connected [`Destination`].
-pub struct DefaultMessageRouter<G: Deref<Target=NetworkGraph<L>>, L: Deref>
+pub struct DefaultMessageRouter<G: Deref<Target=NetworkGraph<L>>, L: Deref, ES: Deref>
where
L::Target: Logger,
+ ES::Target: EntropySource,
{
network_graph: G,
+ entropy_source: ES,
}
-impl<G: Deref<Target=NetworkGraph<L>>, L: Deref> DefaultMessageRouter<G, L>
+impl<G: Deref<Target=NetworkGraph<L>>, L: Deref, ES: Deref> DefaultMessageRouter<G, L, ES>
where
L::Target: Logger,
+ ES::Target: EntropySource,
{
/// Creates a [`DefaultMessageRouter`] using the given [`NetworkGraph`].
- pub fn new(network_graph: G) -> Self {
- Self { network_graph }
+ pub fn new(network_graph: G, entropy_source: ES) -> Self {
+ Self { network_graph, entropy_source }
}
}
-impl<G: Deref<Target=NetworkGraph<L>>, L: Deref> MessageRouter for DefaultMessageRouter<G, L>
+impl<G: Deref<Target=NetworkGraph<L>>, L: Deref, ES: Deref> MessageRouter for DefaultMessageRouter<G, L, ES>
where
L::Target: Logger,
+ ES::Target: EntropySource,
{
fn find_path(
- &self, _sender: PublicKey, peers: Vec<PublicKey>, destination: Destination
+ &self, sender: PublicKey, peers: Vec<PublicKey>, mut destination: Destination
) -> Result<OnionMessagePath, ()> {
- let first_node = destination.first_node();
- if peers.contains(&first_node) {
+ let network_graph = self.network_graph.deref().read_only();
+ destination.resolve(&network_graph);
+
+ let first_node = match destination.first_node() {
+ Some(first_node) => first_node,
+ None => return Err(()),
+ };
+
+ if peers.contains(&first_node) || sender == first_node {
Ok(OnionMessagePath {
intermediate_nodes: vec![], destination, first_node_addresses: None
})
} else {
- let network_graph = self.network_graph.deref().read_only();
let node_announcement = network_graph
.node(&NodeId::from_pubkey(&first_node))
.and_then(|node_info| node_info.announcement_info.as_ref())
}
fn create_blinded_paths<
- ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+ T: secp256k1::Signing + secp256k1::Verification
>(
- &self, recipient: PublicKey, peers: Vec<PublicKey>, entropy_source: &ES,
- secp_ctx: &Secp256k1<T>
+ &self, recipient: PublicKey, peers: Vec<PublicKey>, secp_ctx: &Secp256k1<T>,
) -> Result<Vec<BlindedPath>, ()> {
// Limit the number of blinded paths that are computed.
const MAX_PATHS: usize = 3;
const MIN_PEER_CHANNELS: usize = 3;
let network_graph = self.network_graph.deref().read_only();
- let paths = peers.iter()
+ let is_recipient_announced =
+ network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient));
+
+ let mut peer_info = peers.iter()
// Limit to peers with announced channels
- .filter(|pubkey|
+ .filter_map(|pubkey|
network_graph
.node(&NodeId::from_pubkey(pubkey))
- .map(|info| &info.channels[..])
- .map(|channels| channels.len() >= MIN_PEER_CHANNELS)
- .unwrap_or(false)
+ .filter(|info| info.channels.len() >= MIN_PEER_CHANNELS)
+ .map(|info| (*pubkey, info.is_tor_only(), info.channels.len()))
)
- .map(|pubkey| vec![*pubkey, recipient])
- .map(|node_pks| BlindedPath::new_for_message(&node_pks, entropy_source, secp_ctx))
+ // Exclude Tor-only nodes when the recipient is announced.
+ .filter(|(_, is_tor_only, _)| !(*is_tor_only && is_recipient_announced))
+ .collect::<Vec<_>>();
+
+ // Prefer using non-Tor nodes with the most channels as the introduction node.
+ peer_info.sort_unstable_by(|(_, a_tor_only, a_channels), (_, b_tor_only, b_channels)| {
+ a_tor_only.cmp(b_tor_only).then(a_channels.cmp(b_channels).reverse())
+ });
+
+ let paths = peer_info.into_iter()
+ .map(|(pubkey, _, _)| vec![pubkey, recipient])
+ .map(|node_pks| BlindedPath::new_for_message(&node_pks, &*self.entropy_source, secp_ctx))
.take(MAX_PATHS)
.collect::<Result<Vec<_>, _>>();
match paths {
Ok(paths) if !paths.is_empty() => Ok(paths),
_ => {
- if network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient)) {
- BlindedPath::one_hop_for_message(recipient, entropy_source, secp_ctx)
+ if is_recipient_announced {
+ BlindedPath::one_hop_for_message(recipient, &*self.entropy_source, secp_ctx)
.map(|path| vec![path])
} else {
Err(())
impl OnionMessagePath {
/// Returns the first node in the path.
- pub fn first_node(&self) -> PublicKey {
+ pub fn first_node(&self) -> Option<PublicKey> {
self.intermediate_nodes
.first()
.copied()
- .unwrap_or_else(|| self.destination.first_node())
+ .or_else(|| self.destination.first_node())
}
}
/// The destination of an onion message.
-#[derive(Clone)]
+#[derive(Clone, Hash, Debug, PartialEq, Eq)]
pub enum Destination {
/// We're sending this onion message to a node.
Node(PublicKey),
}
impl Destination {
+ /// Attempts to resolve the [`IntroductionNode::DirectedShortChannelId`] of a
+ /// [`Destination::BlindedPath`] to a [`IntroductionNode::NodeId`], if applicable, using the
+ /// provided [`ReadOnlyNetworkGraph`].
+ pub fn resolve(&mut self, network_graph: &ReadOnlyNetworkGraph) {
+ if let Destination::BlindedPath(path) = self {
+ if let IntroductionNode::DirectedShortChannelId(..) = path.introduction_node {
+ if let Some(pubkey) = path
+ .public_introduction_node_id(network_graph)
+ .and_then(|node_id| node_id.as_pubkey().ok())
+ {
+ path.introduction_node = IntroductionNode::NodeId(pubkey);
+ }
+ }
+ }
+ }
+
pub(super) fn num_hops(&self) -> usize {
match self {
Destination::Node(_) => 1,
}
}
- fn first_node(&self) -> PublicKey {
+ fn first_node(&self) -> Option<PublicKey> {
match self {
- Destination::Node(node_id) => *node_id,
- Destination::BlindedPath(BlindedPath { introduction_node_id: node_id, .. }) => *node_id,
+ Destination::Node(node_id) => Some(*node_id),
+ Destination::BlindedPath(BlindedPath { introduction_node, .. }) => {
+ match introduction_node {
+ IntroductionNode::NodeId(pubkey) => Some(*pubkey),
+ IntroductionNode::DirectedShortChannelId(..) => None,
+ }
+ },
}
}
}
/// Result of successfully [sending an onion message].
///
/// [sending an onion message]: OnionMessenger::send_onion_message
-#[derive(Debug, PartialEq, Eq)]
+#[derive(Clone, Hash, Debug, PartialEq, Eq)]
pub enum SendSuccess {
/// The message was buffered and will be sent once it is processed by
/// [`OnionMessageHandler::next_onion_message_for_peer`].
/// Errors that may occur when [sending an onion message].
///
/// [sending an onion message]: OnionMessenger::send_onion_message
-#[derive(Debug, PartialEq, Eq)]
+#[derive(Clone, Hash, Debug, PartialEq, Eq)]
pub enum SendError {
/// Errored computing onion message packet keys.
Secp256k1(secp256k1::Error),
///
/// [`NodeSigner`]: crate::sign::NodeSigner
GetNodeIdFailed,
+ /// The provided [`Destination`] has a blinded path with an unresolved introduction node. An
+ /// attempt to resolve it in the [`MessageRouter`] when finding an [`OnionMessagePath`] likely
+ /// failed.
+ UnresolvedIntroductionNode,
/// We attempted to send to a blinded path where we are the introduction node, and failed to
/// advance the blinded path to make the second hop the new introduction node. Either
/// [`NodeSigner::ecdh`] failed, we failed to tweak the current blinding point to get the
/// A processed incoming onion message, containing either a Forward (another onion message)
/// or a Receive payload with decrypted contents.
+#[derive(Debug)]
pub enum PeeledOnion<T: OnionMessageContents> {
/// Forwarded onion, with the next node id and a new onion
- Forward(PublicKey, OnionMessage),
+ Forward(NextHop, OnionMessage),
/// Received onion message, with decrypted contents, path_id, and reply path
Receive(ParsedOnionMessageContents<T>, Option<[u8; 32]>, Option<BlindedPath>)
}
+
+/// Creates an [`OnionMessage`] with the given `contents` for sending to the destination of
+/// `path`, first calling [`Destination::resolve`] on `path.destination` with the given
+/// [`ReadOnlyNetworkGraph`].
+///
+/// Returns the node id of the peer to send the message to, the message itself, and any addresses
+/// needed to connect to the first node.
+pub fn create_onion_message_resolving_destination<
+ ES: Deref, NS: Deref, NL: Deref, T: OnionMessageContents
+>(
+ entropy_source: &ES, node_signer: &NS, node_id_lookup: &NL,
+ network_graph: &ReadOnlyNetworkGraph, secp_ctx: &Secp256k1<secp256k1::All>,
+ mut path: OnionMessagePath, contents: T, reply_path: Option<BlindedPath>,
+) -> Result<(PublicKey, OnionMessage, Option<Vec<SocketAddress>>), SendError>
+where
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ NL::Target: NodeIdLookUp,
+{
+ path.destination.resolve(network_graph);
+ create_onion_message(
+ entropy_source, node_signer, node_id_lookup, secp_ctx, path, contents, reply_path,
+ )
+}
+
/// Creates an [`OnionMessage`] with the given `contents` for sending to the destination of
/// `path`.
///
/// Returns the node id of the peer to send the message to, the message itself, and any addresses
-/// need to connect to the first node.
-pub fn create_onion_message<ES: Deref, NS: Deref, T: OnionMessageContents>(
- entropy_source: &ES, node_signer: &NS, secp_ctx: &Secp256k1<secp256k1::All>,
- path: OnionMessagePath, contents: T, reply_path: Option<BlindedPath>,
+/// needed to connect to the first node.
+///
+/// Returns [`SendError::UnresolvedIntroductionNode`] if:
+/// - `destination` contains a blinded path with an [`IntroductionNode::DirectedShortChannelId`],
+/// - unless it can be resolved by [`NodeIdLookUp::next_node_id`].
+/// Use [`create_onion_message_resolving_destination`] instead to resolve the introduction node
+/// first with a [`ReadOnlyNetworkGraph`].
+pub fn create_onion_message<ES: Deref, NS: Deref, NL: Deref, T: OnionMessageContents>(
+ entropy_source: &ES, node_signer: &NS, node_id_lookup: &NL,
+ secp_ctx: &Secp256k1<secp256k1::All>, path: OnionMessagePath, contents: T,
+ reply_path: Option<BlindedPath>,
) -> Result<(PublicKey, OnionMessage, Option<Vec<SocketAddress>>), SendError>
where
ES::Target: EntropySource,
NS::Target: NodeSigner,
+ NL::Target: NodeIdLookUp,
{
let OnionMessagePath { intermediate_nodes, mut destination, first_node_addresses } = path;
if let Destination::BlindedPath(BlindedPath { ref blinded_hops, .. }) = destination {
if let Destination::BlindedPath(ref mut blinded_path) = destination {
let our_node_id = node_signer.get_node_id(Recipient::Node)
.map_err(|()| SendError::GetNodeIdFailed)?;
- if blinded_path.introduction_node_id == our_node_id {
- advance_path_by_one(blinded_path, node_signer, &secp_ctx)
+ let introduction_node_id = match blinded_path.introduction_node {
+ IntroductionNode::NodeId(pubkey) => pubkey,
+ IntroductionNode::DirectedShortChannelId(direction, scid) => {
+ match node_id_lookup.next_node_id(scid) {
+ Some(next_node_id) => *direction.select_pubkey(&our_node_id, &next_node_id),
+ None => return Err(SendError::UnresolvedIntroductionNode),
+ }
+ },
+ };
+ if introduction_node_id == our_node_id {
+ advance_path_by_one(blinded_path, node_signer, node_id_lookup, &secp_ctx)
.map_err(|()| SendError::BlindedPathAdvanceFailed)?;
}
}
let (first_node_id, blinding_point) = if let Some(first_node_id) = intermediate_nodes.first() {
(*first_node_id, PublicKey::from_secret_key(&secp_ctx, &blinding_secret))
} else {
- match destination {
- Destination::Node(pk) => (pk, PublicKey::from_secret_key(&secp_ctx, &blinding_secret)),
- Destination::BlindedPath(BlindedPath { introduction_node_id, blinding_point, .. }) =>
- (introduction_node_id, blinding_point),
+ match &destination {
+ Destination::Node(pk) => (*pk, PublicKey::from_secret_key(&secp_ctx, &blinding_secret)),
+ Destination::BlindedPath(BlindedPath { introduction_node, blinding_point, .. }) => {
+ match introduction_node {
+ IntroductionNode::NodeId(pubkey) => (*pubkey, *blinding_point),
+ IntroductionNode::DirectedShortChannelId(..) => {
+ return Err(SendError::UnresolvedIntroductionNode);
+ },
+ }
+ }
}
};
let (packet_payloads, packet_keys) = packet_payloads_and_keys(
- &secp_ctx, &intermediate_nodes, destination, contents, reply_path, &blinding_secret)
- .map_err(|e| SendError::Secp256k1(e))?;
+ &secp_ctx, &intermediate_nodes, destination, contents, reply_path, &blinding_secret
+ )?;
let prng_seed = entropy_source.get_secure_random_bytes();
let onion_routing_packet = construct_onion_message_packet(
Ok(PeeledOnion::Receive(message, path_id, reply_path))
},
Ok((Payload::Forward(ForwardControlTlvs::Unblinded(ForwardTlvs {
- next_node_id, next_blinding_override
+ next_hop, next_blinding_override
})), Some((next_hop_hmac, new_packet_bytes)))) => {
- // TODO: we need to check whether `next_node_id` is our node, in which case this is a dummy
+ // TODO: we need to check whether `next_hop` is our node, in which case this is a dummy
// blinded hop and this onion message is destined for us. In this situation, we should keep
// unwrapping the onion layers to get to the final payload. Since we don't have the option
// of creating blinded paths with dummy hops currently, we should be ok to not handle this
onion_routing_packet: outgoing_packet,
};
- Ok(PeeledOnion::Forward(next_node_id, onion_message))
+ Ok(PeeledOnion::Forward(next_hop, onion_message))
},
Err(e) => {
log_trace!(logger, "Errored decoding onion message packet: {:?}", e);
}
}
-impl<ES: Deref, NS: Deref, L: Deref, MR: Deref, OMH: Deref, CMH: Deref>
-OnionMessenger<ES, NS, L, MR, OMH, CMH>
+impl<ES: Deref, NS: Deref, L: Deref, NL: Deref, MR: Deref, OMH: Deref, CMH: Deref>
+OnionMessenger<ES, NS, L, NL, MR, OMH, CMH>
where
ES::Target: EntropySource,
NS::Target: NodeSigner,
L::Target: Logger,
+ NL::Target: NodeIdLookUp,
MR::Target: MessageRouter,
OMH::Target: OffersMessageHandler,
CMH::Target: CustomOnionMessageHandler,
/// Constructs a new `OnionMessenger` to send, forward, and delegate received onion messages to
/// their respective handlers.
pub fn new(
- entropy_source: ES, node_signer: NS, logger: L, message_router: MR, offers_handler: OMH,
- custom_handler: CMH
+ entropy_source: ES, node_signer: NS, logger: L, node_id_lookup: NL, message_router: MR,
+ offers_handler: OMH, custom_handler: CMH
) -> Self {
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
OnionMessenger {
entropy_source,
node_signer,
- message_recipients: Mutex::new(HashMap::new()),
+ message_recipients: Mutex::new(new_hash_map()),
secp_ctx,
logger,
+ node_id_lookup,
message_router,
offers_handler,
custom_handler,
}
}
+ #[cfg(test)]
+ pub(crate) fn set_offers_handler(&mut self, offers_handler: OMH) {
+ self.offers_handler = offers_handler;
+ }
+
/// Sends an [`OnionMessage`] with the given `contents` to `destination`.
///
/// See [`OnionMessenger`] for example usage.
&self, contents: T, destination: Destination, reply_path: Option<BlindedPath>,
log_suffix: fmt::Arguments
) -> Result<SendSuccess, SendError> {
+ let mut logger = WithContext::from(&self.logger, None, None);
let result = self.find_path(destination)
- .and_then(|path| self.enqueue_onion_message(path, contents, reply_path, log_suffix));
+ .and_then(|path| {
+ let first_hop = path.intermediate_nodes.get(0).map(|p| *p);
+ logger = WithContext::from(&self.logger, first_hop, None);
+ self.enqueue_onion_message(path, contents, reply_path, log_suffix)
+ });
match result.as_ref() {
Err(SendError::GetNodeIdFailed) => {
- log_warn!(self.logger, "Unable to retrieve node id {}", log_suffix);
+ log_warn!(logger, "Unable to retrieve node id {}", log_suffix);
},
Err(SendError::PathNotFound) => {
- log_trace!(self.logger, "Failed to find path {}", log_suffix);
+ log_trace!(logger, "Failed to find path {}", log_suffix);
},
Err(e) => {
- log_trace!(self.logger, "Failed sending onion message {}: {:?}", log_suffix, e);
+ log_trace!(logger, "Failed sending onion message {}: {:?}", log_suffix, e);
},
Ok(SendSuccess::Buffered) => {
- log_trace!(self.logger, "Buffered onion message {}", log_suffix);
+ log_trace!(logger, "Buffered onion message {}", log_suffix);
},
Ok(SendSuccess::BufferedAwaitingConnection(node_id)) => {
log_trace!(
- self.logger, "Buffered onion message waiting on peer connection {}: {:?}",
+ logger,
+ "Buffered onion message waiting on peer connection {}: {}",
log_suffix, node_id
);
},
log_trace!(self.logger, "Constructing onion message {}: {:?}", log_suffix, contents);
let (first_node_id, onion_message, addresses) = create_onion_message(
- &self.entropy_source, &self.node_signer, &self.secp_ctx, path, contents, reply_path
+ &self.entropy_source, &self.node_signer, &self.node_id_lookup, &self.secp_ctx, path,
+ contents, reply_path,
)?;
let mut message_recipients = self.message_recipients.lock().unwrap();
}
}
- #[cfg(test)]
- pub(super) fn send_onion_message_using_path<T: OnionMessageContents>(
+ #[cfg(any(test, feature = "_test_utils"))]
+ pub fn send_onion_message_using_path<T: OnionMessageContents>(
&self, path: OnionMessagePath, contents: T, reply_path: Option<BlindedPath>
) -> Result<SendSuccess, SendError> {
self.enqueue_onion_message(path, contents, reply_path, format_args!(""))
}
+ pub(crate) fn peel_onion_message(
+ &self, msg: &OnionMessage
+ ) -> Result<PeeledOnion<<<CMH>::Target as CustomOnionMessageHandler>::CustomMessage>, ()> {
+ peel_onion_message(
+ msg, &self.secp_ctx, &*self.node_signer, &*self.logger, &*self.custom_handler
+ )
+ }
+
fn handle_onion_message_response<T: OnionMessageContents>(
&self, response: Option<T>, reply_path: Option<BlindedPath>, log_suffix: fmt::Arguments
) {
#[cfg(test)]
pub(super) fn release_pending_msgs(&self) -> HashMap<PublicKey, VecDeque<OnionMessage>> {
let mut message_recipients = self.message_recipients.lock().unwrap();
- let mut msgs = HashMap::new();
+ let mut msgs = new_hash_map();
// We don't want to disconnect the peers by removing them entirely from the original map, so we
// release the pending message buffers individually.
for (node_id, recipient) in &mut *message_recipients {
false
}
-impl<ES: Deref, NS: Deref, L: Deref, MR: Deref, OMH: Deref, CMH: Deref> EventsProvider
-for OnionMessenger<ES, NS, L, MR, OMH, CMH>
+impl<ES: Deref, NS: Deref, L: Deref, NL: Deref, MR: Deref, OMH: Deref, CMH: Deref> EventsProvider
+for OnionMessenger<ES, NS, L, NL, MR, OMH, CMH>
where
ES::Target: EntropySource,
NS::Target: NodeSigner,
L::Target: Logger,
+ NL::Target: NodeIdLookUp,
MR::Target: MessageRouter,
OMH::Target: OffersMessageHandler,
CMH::Target: CustomOnionMessageHandler,
}
}
-impl<ES: Deref, NS: Deref, L: Deref, MR: Deref, OMH: Deref, CMH: Deref> OnionMessageHandler
-for OnionMessenger<ES, NS, L, MR, OMH, CMH>
+impl<ES: Deref, NS: Deref, L: Deref, NL: Deref, MR: Deref, OMH: Deref, CMH: Deref> OnionMessageHandler
+for OnionMessenger<ES, NS, L, NL, MR, OMH, CMH>
where
ES::Target: EntropySource,
NS::Target: NodeSigner,
L::Target: Logger,
+ NL::Target: NodeIdLookUp,
MR::Target: MessageRouter,
OMH::Target: OffersMessageHandler,
CMH::Target: CustomOnionMessageHandler,
{
- fn handle_onion_message(&self, _peer_node_id: &PublicKey, msg: &OnionMessage) {
- match peel_onion_message(
- msg, &self.secp_ctx, &*self.node_signer, &*self.logger, &*self.custom_handler
- ) {
+ fn handle_onion_message(&self, peer_node_id: &PublicKey, msg: &OnionMessage) {
+ let logger = WithContext::from(&self.logger, Some(*peer_node_id), None);
+ match self.peel_onion_message(msg) {
Ok(PeeledOnion::Receive(message, path_id, reply_path)) => {
log_trace!(
- self.logger,
- "Received an onion message with path_id {:02x?} and {} reply_path: {:?}",
+ logger,
+ "Received an onion message with path_id {:02x?} and {} reply_path: {:?}",
path_id, if reply_path.is_some() { "a" } else { "no" }, message);
match message {
},
}
},
- Ok(PeeledOnion::Forward(next_node_id, onion_message)) => {
+ Ok(PeeledOnion::Forward(next_hop, onion_message)) => {
+ let next_node_id = match next_hop {
+ NextHop::NodeId(pubkey) => pubkey,
+ NextHop::ShortChannelId(scid) => match self.node_id_lookup.next_node_id(scid) {
+ Some(pubkey) => pubkey,
+ None => {
+ log_trace!(self.logger, "Dropping forwarded onion messager: unable to resolve next hop using SCID {}", scid);
+ return
+ },
+ },
+ };
+
let mut message_recipients = self.message_recipients.lock().unwrap();
if outbound_buffer_full(&next_node_id, &message_recipients) {
- log_trace!(self.logger, "Dropping forwarded onion message to peer {:?}: outbound buffer full", next_node_id);
+ log_trace!(
+ logger,
+ "Dropping forwarded onion message to peer {}: outbound buffer full",
+ next_node_id);
return
}
e.get(), OnionMessageRecipient::ConnectedPeer(..)
) => {
e.get_mut().enqueue_message(onion_message);
- log_trace!(self.logger, "Forwarding an onion message to peer {}", next_node_id);
+ log_trace!(logger, "Forwarding an onion message to peer {}", next_node_id);
},
_ => {
- log_trace!(self.logger, "Dropping forwarded onion message to disconnected peer {:?}", next_node_id);
+ log_trace!(
+ logger,
+ "Dropping forwarded onion message to disconnected peer {}",
+ next_node_id);
return
},
}
},
Err(e) => {
- log_error!(self.logger, "Failed to process onion message {:?}", e);
+ log_error!(logger, "Failed to process onion message {:?}", e);
}
}
}
Arc<KeysManager>,
Arc<KeysManager>,
Arc<L>,
- Arc<DefaultMessageRouter<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>,
+ Arc<SimpleArcChannelManager<M, T, F, L>>,
+ Arc<DefaultMessageRouter<Arc<NetworkGraph<Arc<L>>>, Arc<L>, Arc<KeysManager>>>,
Arc<SimpleArcChannelManager<M, T, F, L>>,
IgnoringMessageHandler
>;
&'a KeysManager,
&'a KeysManager,
&'b L,
- &'i DefaultMessageRouter<&'g NetworkGraph<&'b L>, &'b L>,
- &'j SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L>,
+ &'i SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L>,
+ &'j DefaultMessageRouter<&'g NetworkGraph<&'b L>, &'b L, &'a KeysManager>,
+ &'i SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L>,
IgnoringMessageHandler
>;
fn packet_payloads_and_keys<T: OnionMessageContents, S: secp256k1::Signing + secp256k1::Verification>(
secp_ctx: &Secp256k1<S>, unblinded_path: &[PublicKey], destination: Destination, message: T,
mut reply_path: Option<BlindedPath>, session_priv: &SecretKey
-) -> Result<(Vec<(Payload<T>, [u8; 32])>, Vec<onion_utils::OnionKeys>), secp256k1::Error> {
+) -> Result<(Vec<(Payload<T>, [u8; 32])>, Vec<onion_utils::OnionKeys>), SendError> {
let num_hops = unblinded_path.len() + destination.num_hops();
let mut payloads = Vec::with_capacity(num_hops);
let mut onion_packet_keys = Vec::with_capacity(num_hops);
- let (mut intro_node_id_blinding_pt, num_blinded_hops) = if let Destination::BlindedPath(BlindedPath {
- introduction_node_id, blinding_point, blinded_hops }) = &destination {
- (Some((*introduction_node_id, *blinding_point)), blinded_hops.len()) } else { (None, 0) };
+ let (mut intro_node_id_blinding_pt, num_blinded_hops) = match &destination {
+ Destination::Node(_) => (None, 0),
+ Destination::BlindedPath(BlindedPath { introduction_node, blinding_point, blinded_hops }) => {
+ let introduction_node_id = match introduction_node {
+ IntroductionNode::NodeId(pubkey) => pubkey,
+ IntroductionNode::DirectedShortChannelId(..) => {
+ return Err(SendError::UnresolvedIntroductionNode);
+ },
+ };
+ (Some((*introduction_node_id, *blinding_point)), blinded_hops.len())
+ },
+ };
let num_unblinded_hops = num_hops - num_blinded_hops;
let mut unblinded_path_idx = 0;
if let Some(ss) = prev_control_tlvs_ss.take() {
payloads.push((Payload::Forward(ForwardControlTlvs::Unblinded(
ForwardTlvs {
- next_node_id: unblinded_pk_opt.unwrap(),
+ next_hop: NextHop::NodeId(unblinded_pk_opt.unwrap()),
next_blinding_override: None,
}
)), ss));
} else if let Some((intro_node_id, blinding_pt)) = intro_node_id_blinding_pt.take() {
if let Some(control_tlvs_ss) = prev_control_tlvs_ss.take() {
payloads.push((Payload::Forward(ForwardControlTlvs::Unblinded(ForwardTlvs {
- next_node_id: intro_node_id,
+ next_hop: NextHop::NodeId(intro_node_id),
next_blinding_override: Some(blinding_pt),
})), control_tlvs_ss));
}
mu,
});
}
- )?;
+ ).map_err(|e| SendError::Secp256k1(e))?;
if let Some(control_tlvs) = final_control_tlvs {
payloads.push((Payload::Receive {
//! Message handling for BOLT 12 Offers.
-use core::convert::TryFrom;
use core::fmt;
use crate::io::{self, Read};
use crate::ln::msgs::DecodeError;
use bitcoin::secp256k1::ecdh::SharedSecret;
use crate::blinded_path::BlindedPath;
-use crate::blinded_path::message::{ForwardTlvs, ReceiveTlvs};
+use crate::blinded_path::message::{ForwardTlvs, NextHop, ReceiveTlvs};
use crate::blinded_path::utils::Padding;
use crate::ln::msgs::DecodeError;
use crate::ln::onion_utils;
fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
_init_and_read_tlv_stream!(r, {
(1, _padding, option),
- (2, _short_channel_id, option),
+ (2, short_channel_id, option),
(4, next_node_id, option),
(6, path_id, option),
(8, next_blinding_override, option),
});
let _padding: Option<Padding> = _padding;
- let _short_channel_id: Option<u64> = _short_channel_id;
- let valid_fwd_fmt = next_node_id.is_some() && path_id.is_none();
- let valid_recv_fmt = next_node_id.is_none() && next_blinding_override.is_none();
+ let next_hop = match (short_channel_id, next_node_id) {
+ (Some(_), Some(_)) => return Err(DecodeError::InvalidValue),
+ (Some(scid), None) => Some(NextHop::ShortChannelId(scid)),
+ (None, Some(pubkey)) => Some(NextHop::NodeId(pubkey)),
+ (None, None) => None,
+ };
+
+ let valid_fwd_fmt = next_hop.is_some() && path_id.is_none();
+ let valid_recv_fmt = next_hop.is_none() && next_blinding_override.is_none();
let payload_fmt = if valid_fwd_fmt {
ControlTlvs::Forward(ForwardTlvs {
- next_node_id: next_node_id.unwrap(),
+ next_hop: next_hop.unwrap(),
next_blinding_override,
})
} else if valid_recv_fmt {
use crate::io_extras::{copy, sink};
use crate::prelude::*;
use core::{cmp, fmt};
-use core::convert::TryFrom;
use crate::sync::{RwLock, RwLockReadGuard, LockTestExt};
#[cfg(feature = "std")]
use core::sync::atomic::{AtomicUsize, Ordering};
NodeId(pubkey.serialize())
}
+ /// Create a new NodeId from a slice of bytes
+ pub fn from_slice(bytes: &[u8]) -> Result<Self, DecodeError> {
+ if bytes.len() != PUBLIC_KEY_SIZE {
+ return Err(DecodeError::InvalidValue);
+ }
+ let mut data = [0; PUBLIC_KEY_SIZE];
+ data.copy_from_slice(bytes);
+ Ok(NodeId(data))
+ }
+
/// Get the public key slice from this NodeId
pub fn as_slice(&self) -> &[u8] {
&self.0
// Prior replies should use the number of blocks that fit into the reply. Overflow
// safe since first_blocknum is always <= last SCID's block.
else {
- (false, block_from_scid(batch.last().unwrap()) - first_blocknum)
+ (false, block_from_scid(*batch.last().unwrap()) - first_blocknum)
};
prev_batch_endblock = first_blocknum + number_of_blocks;
///
/// Refers to the `node_id` forwarding the payment to the next hop.
#[inline]
- pub(super) fn source(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_one } else { &self.channel.node_two } }
+ pub fn source(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_one } else { &self.channel.node_two } }
/// Returns the `node_id` of the target hop.
///
/// Refers to the `node_id` receiving the payment from the previous hop.
#[inline]
- pub(super) fn target(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_two } else { &self.channel.node_one } }
+ pub fn target(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_two } else { &self.channel.node_one } }
}
impl<'a> fmt::Debug for DirectedChannelInfo<'a> {
pub announcement_info: Option<NodeAnnouncementInfo>
}
+impl NodeInfo {
+ /// Returns whether the node has only announced Tor addresses.
+ pub fn is_tor_only(&self) -> bool {
+ self.announcement_info
+ .as_ref()
+ .map(|info| info.addresses())
+ .and_then(|addresses| (!addresses.is_empty()).then(|| addresses))
+ .map(|addresses| addresses.iter().all(|address| address.is_tor()))
+ .unwrap_or(false)
+ }
+}
+
impl fmt::Display for NodeInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, " channels: {:?}, announcement_info: {:?}",
channels: RwLock::new(channels),
nodes: RwLock::new(nodes),
last_rapid_gossip_sync_timestamp: Mutex::new(last_rapid_gossip_sync_timestamp),
- removed_nodes: Mutex::new(HashMap::new()),
- removed_channels: Mutex::new(HashMap::new()),
+ removed_nodes: Mutex::new(new_hash_map()),
+ removed_channels: Mutex::new(new_hash_map()),
pending_checks: utxo::PendingChecks::new(),
})
}
channels: RwLock::new(IndexedMap::new()),
nodes: RwLock::new(IndexedMap::new()),
last_rapid_gossip_sync_timestamp: Mutex::new(None),
- removed_channels: Mutex::new(HashMap::new()),
- removed_nodes: Mutex::new(HashMap::new()),
+ removed_channels: Mutex::new(new_hash_map()),
+ removed_nodes: Mutex::new(new_hash_map()),
pending_checks: utxo::PendingChecks::new(),
}
}
// NOTE: In the case of no-std, we won't have access to the current UNIX time at the time of removal,
// so we'll just set the removal time here to the current UNIX time on the very next invocation
// of this function.
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
{
let mut tracked_time = Some(current_time_unix);
core::mem::swap(time, &mut tracked_time);
None => {
core::mem::drop(channels);
self.pending_checks.check_hold_pending_channel_update(msg, full_msg)?;
- return Err(LightningError{err: "Couldn't find channel for update".to_owned(), action: ErrorAction::IgnoreError});
+ return Err(LightningError {
+ err: "Couldn't find channel for update".to_owned(),
+ action: ErrorAction::IgnoreAndLog(Level::Gossip),
+ });
},
Some(channel) => {
if msg.htlc_maximum_msat > MAX_VALUE_MSAT {
use crate::ln::chan_utils::make_funding_redeemscript;
#[cfg(feature = "std")]
use crate::ln::features::InitFeatures;
+ use crate::ln::msgs::SocketAddress;
use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate, NodeAlias, MAX_EXCESS_BYTES_FOR_RELAY, NodeId, RoutingFees, ChannelUpdateInfo, ChannelInfo, NodeAnnouncementInfo, NodeInfo};
use crate::routing::utxo::{UtxoLookupError, UtxoResult};
use crate::ln::msgs::{RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement,
ReplyChannelRange, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT};
use crate::util::config::UserConfig;
use crate::util::test_utils;
- use crate::util::ser::{ReadableArgs, Readable, Writeable};
+ use crate::util::ser::{Hostname, ReadableArgs, Readable, Writeable};
use crate::util::scid_utils::scid_from_parts;
use crate::routing::gossip::REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS;
let node_id = NodeId([42; 33]);
assert_eq!(format!("{}", &node_id), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
}
+
+ #[test]
+ fn is_tor_only_node() {
+ let network_graph = create_network_graph();
+ let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
+
+ let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
+ let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
+ let node_1_id = NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, node_1_privkey));
+
+ let announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
+ gossip_sync.handle_channel_announcement(&announcement).unwrap();
+
+ let tcp_ip_v4 = SocketAddress::TcpIpV4 {
+ addr: [255, 254, 253, 252],
+ port: 9735
+ };
+ let tcp_ip_v6 = SocketAddress::TcpIpV6 {
+ addr: [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
+ port: 9735
+ };
+ let onion_v2 = SocketAddress::OnionV2([255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 38, 7]);
+ let onion_v3 = SocketAddress::OnionV3 {
+ ed25519_pubkey: [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 237, 236, 235, 234, 233, 232, 231, 230, 229, 228, 227, 226, 225, 224],
+ checksum: 32,
+ version: 16,
+ port: 9735
+ };
+ let hostname = SocketAddress::Hostname {
+ hostname: Hostname::try_from(String::from("host")).unwrap(),
+ port: 9735,
+ };
+
+ assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
+
+ let announcement = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx);
+ gossip_sync.handle_node_announcement(&announcement).unwrap();
+ assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
+
+ let announcement = get_signed_node_announcement(
+ |announcement| {
+ announcement.addresses = vec![
+ tcp_ip_v4.clone(), tcp_ip_v6.clone(), onion_v2.clone(), onion_v3.clone(),
+ hostname.clone()
+ ];
+ announcement.timestamp += 1000;
+ },
+ node_1_privkey, &secp_ctx
+ );
+ gossip_sync.handle_node_announcement(&announcement).unwrap();
+ assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
+
+ let announcement = get_signed_node_announcement(
+ |announcement| {
+ announcement.addresses = vec![
+ tcp_ip_v4.clone(), tcp_ip_v6.clone(), onion_v2.clone(), onion_v3.clone()
+ ];
+ announcement.timestamp += 2000;
+ },
+ node_1_privkey, &secp_ctx
+ );
+ gossip_sync.handle_node_announcement(&announcement).unwrap();
+ assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
+
+ let announcement = get_signed_node_announcement(
+ |announcement| {
+ announcement.addresses = vec![
+ tcp_ip_v6.clone(), onion_v2.clone(), onion_v3.clone()
+ ];
+ announcement.timestamp += 3000;
+ },
+ node_1_privkey, &secp_ctx
+ );
+ gossip_sync.handle_node_announcement(&announcement).unwrap();
+ assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
+
+ let announcement = get_signed_node_announcement(
+ |announcement| {
+ announcement.addresses = vec![onion_v2.clone(), onion_v3.clone()];
+ announcement.timestamp += 4000;
+ },
+ node_1_privkey, &secp_ctx
+ );
+ gossip_sync.handle_node_announcement(&announcement).unwrap();
+ assert!(network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
+
+ let announcement = get_signed_node_announcement(
+ |announcement| {
+ announcement.addresses = vec![onion_v2.clone()];
+ announcement.timestamp += 5000;
+ },
+ node_1_privkey, &secp_ctx
+ );
+ gossip_sync.handle_node_announcement(&announcement).unwrap();
+ assert!(network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
+
+ let announcement = get_signed_node_announcement(
+ |announcement| {
+ announcement.addresses = vec![tcp_ip_v4.clone()];
+ announcement.timestamp += 6000;
+ },
+ node_1_privkey, &secp_ctx
+ );
+ gossip_sync.handle_node_announcement(&announcement).unwrap();
+ assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
+ }
}
#[cfg(ldk_bench)]
pub mod router;
pub mod scoring;
#[cfg(test)]
-mod test_utils;
+pub(crate) mod test_utils;
//! The router finds paths within a [`NetworkGraph`] for a payment.
use bitcoin::secp256k1::{PublicKey, Secp256k1, self};
-use bitcoin::hashes::Hash;
-use bitcoin::hashes::sha256::Hash as Sha256;
-use crate::blinded_path::{BlindedHop, BlindedPath};
+use crate::blinded_path::{BlindedHop, BlindedPath, Direction, IntroductionNode};
use crate::blinded_path::payment::{ForwardNode, ForwardTlvs, PaymentConstraints, PaymentRelay, ReceiveTlvs};
use crate::ln::PaymentHash;
-use crate::ln::channelmanager::{ChannelDetails, PaymentId};
+use crate::ln::channelmanager::{ChannelDetails, PaymentId, MIN_FINAL_CLTV_EXPIRY_DELTA};
use crate::ln::features::{BlindedHopFeatures, Bolt11InvoiceFeatures, Bolt12InvoiceFeatures, ChannelFeatures, NodeFeatures};
use crate::ln::msgs::{DecodeError, ErrorAction, LightningError, MAX_VALUE_MSAT};
use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice};
use crate::io;
use crate::prelude::*;
-use crate::sync::Mutex;
use alloc::collections::BinaryHeap;
use core::{cmp, fmt};
use core::ops::Deref;
/// A [`Router`] implemented using [`find_route`].
-pub struct DefaultRouter<G: Deref<Target = NetworkGraph<L>> + Clone, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> where
+pub struct DefaultRouter<G: Deref<Target = NetworkGraph<L>> + Clone, L: Deref, ES: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> where
L::Target: Logger,
S::Target: for <'a> LockableScore<'a, ScoreLookUp = Sc>,
+ ES::Target: EntropySource,
{
network_graph: G,
logger: L,
- random_seed_bytes: Mutex<[u8; 32]>,
+ entropy_source: ES,
scorer: S,
score_params: SP,
- message_router: DefaultMessageRouter<G, L>,
+ message_router: DefaultMessageRouter<G, L, ES>,
}
-impl<G: Deref<Target = NetworkGraph<L>> + Clone, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> DefaultRouter<G, L, S, SP, Sc> where
+impl<G: Deref<Target = NetworkGraph<L>> + Clone, L: Deref, ES: Deref + Clone, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> DefaultRouter<G, L, ES, S, SP, Sc> where
L::Target: Logger,
S::Target: for <'a> LockableScore<'a, ScoreLookUp = Sc>,
+ ES::Target: EntropySource,
{
/// Creates a new router.
- pub fn new(network_graph: G, logger: L, random_seed_bytes: [u8; 32], scorer: S, score_params: SP) -> Self {
- let random_seed_bytes = Mutex::new(random_seed_bytes);
- let message_router = DefaultMessageRouter::new(network_graph.clone());
- Self { network_graph, logger, random_seed_bytes, scorer, score_params, message_router }
+ pub fn new(network_graph: G, logger: L, entropy_source: ES, scorer: S, score_params: SP) -> Self {
+ let message_router = DefaultMessageRouter::new(network_graph.clone(), entropy_source.clone());
+ Self { network_graph, logger, entropy_source, scorer, score_params, message_router }
}
}
-impl<G: Deref<Target = NetworkGraph<L>> + Clone, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> Router for DefaultRouter<G, L, S, SP, Sc> where
+impl<G: Deref<Target = NetworkGraph<L>> + Clone, L: Deref, ES: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> Router for DefaultRouter<G, L, ES, S, SP, Sc> where
L::Target: Logger,
S::Target: for <'a> LockableScore<'a, ScoreLookUp = Sc>,
+ ES::Target: EntropySource,
{
fn find_route(
&self,
first_hops: Option<&[&ChannelDetails]>,
inflight_htlcs: InFlightHtlcs
) -> Result<Route, LightningError> {
- let random_seed_bytes = {
- let mut locked_random_seed_bytes = self.random_seed_bytes.lock().unwrap();
- *locked_random_seed_bytes = Sha256::hash(&*locked_random_seed_bytes).to_byte_array();
- *locked_random_seed_bytes
- };
+ let random_seed_bytes = self.entropy_source.get_secure_random_bytes();
find_route(
payer, params, &self.network_graph, first_hops, &*self.logger,
&ScorerAccountingForInFlightHtlcs::new(self.scorer.read_lock(), &inflight_htlcs),
}
fn create_blinded_payment_paths<
- ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
- >(
+ T: secp256k1::Signing + secp256k1::Verification
+ > (
&self, recipient: PublicKey, first_hops: Vec<ChannelDetails>, tlvs: ReceiveTlvs,
- amount_msats: u64, entropy_source: &ES, secp_ctx: &Secp256k1<T>
+ amount_msats: u64, secp_ctx: &Secp256k1<T>
) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
// Limit the number of blinded paths that are computed.
const MAX_PAYMENT_PATHS: usize = 3;
})
.map(|forward_node| {
BlindedPath::new_for_payment(
- &[forward_node], recipient, tlvs.clone(), u64::MAX, entropy_source, secp_ctx
+ &[forward_node], recipient, tlvs.clone(), u64::MAX, MIN_FINAL_CLTV_EXPIRY_DELTA,
+ &*self.entropy_source, secp_ctx
)
})
.take(MAX_PAYMENT_PATHS)
Ok(paths) if !paths.is_empty() => Ok(paths),
_ => {
if network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient)) {
- BlindedPath::one_hop_for_payment(recipient, tlvs, entropy_source, secp_ctx)
- .map(|path| vec![path])
+ BlindedPath::one_hop_for_payment(
+ recipient, tlvs, MIN_FINAL_CLTV_EXPIRY_DELTA, &*self.entropy_source, secp_ctx
+ ).map(|path| vec![path])
} else {
Err(())
}
}
}
-impl< G: Deref<Target = NetworkGraph<L>> + Clone, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> MessageRouter for DefaultRouter<G, L, S, SP, Sc> where
+impl< G: Deref<Target = NetworkGraph<L>> + Clone, L: Deref, ES: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> MessageRouter for DefaultRouter<G, L, ES, S, SP, Sc> where
L::Target: Logger,
S::Target: for <'a> LockableScore<'a, ScoreLookUp = Sc>,
+ ES::Target: EntropySource,
{
fn find_path(
&self, sender: PublicKey, peers: Vec<PublicKey>, destination: Destination
}
fn create_blinded_paths<
- ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
- >(
- &self, recipient: PublicKey, peers: Vec<PublicKey>, entropy_source: &ES,
- secp_ctx: &Secp256k1<T>
+ T: secp256k1::Signing + secp256k1::Verification
+ > (
+ &self, recipient: PublicKey, peers: Vec<PublicKey>, secp_ctx: &Secp256k1<T>,
) -> Result<Vec<BlindedPath>, ()> {
- self.message_router.create_blinded_paths(recipient, peers, entropy_source, secp_ctx)
+ self.message_router.create_blinded_paths(recipient, peers, secp_ctx)
}
}
/// are assumed to be with the `recipient`'s peers. The payment secret and any constraints are
/// given in `tlvs`.
fn create_blinded_payment_paths<
- ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
- >(
+ T: secp256k1::Signing + secp256k1::Verification
+ > (
&self, recipient: PublicKey, first_hops: Vec<ChannelDetails>, tlvs: ReceiveTlvs,
- amount_msats: u64, entropy_source: &ES, secp_ctx: &Secp256k1<T>
+ amount_msats: u64, secp_ctx: &Secp256k1<T>
) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()>;
}
impl InFlightHtlcs {
/// Constructs an empty `InFlightHtlcs`.
- pub fn new() -> Self { InFlightHtlcs(HashMap::new()) }
+ pub fn new() -> Self { InFlightHtlcs(new_hash_map()) }
/// Takes in a path with payer's node id and adds the path's details to `InFlightHtlcs`.
pub fn process_path(&mut self, path: &Path, payer_node_id: PublicKey) {
write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
(self.paths.len() as u64).write(writer)?;
let mut blinded_tails = Vec::new();
- for path in self.paths.iter() {
+ for (idx, path) in self.paths.iter().enumerate() {
(path.hops.len() as u8).write(writer)?;
- for (idx, hop) in path.hops.iter().enumerate() {
+ for hop in path.hops.iter() {
hop.write(writer)?;
- if let Some(blinded_tail) = &path.blinded_tail {
- if blinded_tails.is_empty() {
- blinded_tails = Vec::with_capacity(path.hops.len());
- for _ in 0..idx {
- blinded_tails.push(None);
- }
- }
- blinded_tails.push(Some(blinded_tail));
- } else if !blinded_tails.is_empty() { blinded_tails.push(None); }
}
+ if let Some(blinded_tail) = &path.blinded_tail {
+ if blinded_tails.is_empty() {
+ blinded_tails = Vec::with_capacity(path.hops.len());
+ for _ in 0..idx {
+ blinded_tails.push(None);
+ }
+ }
+ blinded_tails.push(Some(blinded_tail));
+ } else if !blinded_tails.is_empty() { blinded_tails.push(None); }
}
write_tlv_fields!(writer, {
// For compatibility with LDK versions prior to 0.0.117, we take the individual
(1, self.route_params.as_ref().map(|p| &p.payment_params), option),
(2, blinded_tails, optional_vec),
(3, self.route_params.as_ref().map(|p| p.final_value_msat), option),
- (5, self.route_params.as_ref().map(|p| p.max_total_routing_fee_msat), option),
+ (5, self.route_params.as_ref().and_then(|p| p.max_total_routing_fee_msat), option),
});
Ok(())
}
_ => None,
}
}
- fn blinded_route_hints(&self) -> &[(BlindedPayInfo, BlindedPath)] {
+ pub(crate) fn blinded_route_hints(&self) -> &[(BlindedPayInfo, BlindedPath)] {
match self {
Self::Blinded { route_hints, .. } => &route_hints[..],
Self::Clear { .. } => &[]
/// has been funded and is able to pay), and accessor methods may panic otherwise.
///
/// [`find_route`] validates this prior to constructing a [`CandidateRouteHop`].
+ ///
+ /// This is not exported to bindings users as lifetimes are not expressible in most languages.
pub details: &'a ChannelDetails,
/// The node id of the payer, which is also the source side of this candidate route hop.
+ ///
+ /// This is not exported to bindings users as lifetimes are not expressible in most languages.
pub payer_node_id: &'a NodeId,
}
pub struct PublicHopCandidate<'a> {
/// Information about the channel, including potentially its capacity and
/// direction-specific information.
+ ///
+ /// This is not exported to bindings users as lifetimes are not expressible in most languages.
pub info: DirectedChannelInfo<'a>,
/// The short channel ID of the channel, i.e. the identifier by which we refer to this
/// channel.
#[derive(Clone, Debug)]
pub struct PrivateHopCandidate<'a> {
/// Information about the private hop communicated via BOLT 11.
+ ///
+ /// This is not exported to bindings users as lifetimes are not expressible in most languages.
pub hint: &'a RouteHintHop,
/// Node id of the next hop in BOLT 11 route hint.
+ ///
+ /// This is not exported to bindings users as lifetimes are not expressible in most languages.
pub target_node_id: &'a NodeId
}
/// A [`CandidateRouteHop::Blinded`] entry.
#[derive(Clone, Debug)]
pub struct BlindedPathCandidate<'a> {
+ /// The node id of the introduction node, resolved from either the [`NetworkGraph`] or first
+ /// hops.
+ ///
+ /// This is not exported to bindings users as lifetimes are not expressible in most languages.
+ pub source_node_id: &'a NodeId,
/// Information about the blinded path including the fee, HTLC amount limits, and
/// cryptographic material required to build an HTLC through the given path.
+ ///
+ /// This is not exported to bindings users as lifetimes are not expressible in most languages.
pub hint: &'a (BlindedPayInfo, BlindedPath),
/// Index of the hint in the original list of blinded hints.
///
/// A [`CandidateRouteHop::OneHopBlinded`] entry.
#[derive(Clone, Debug)]
pub struct OneHopBlindedPathCandidate<'a> {
+ /// The node id of the introduction node, resolved from either the [`NetworkGraph`] or first
+ /// hops.
+ ///
+ /// This is not exported to bindings users as lifetimes are not expressible in most languages.
+ pub source_node_id: &'a NodeId,
/// Information about the blinded path including the fee, HTLC amount limits, and
/// cryptographic material required to build an HTLC terminating with the given path.
///
/// Note that the [`BlindedPayInfo`] is ignored here.
+ ///
+ /// This is not exported to bindings users as lifetimes are not expressible in most languages.
pub hint: &'a (BlindedPayInfo, BlindedPath),
/// Index of the hint in the original list of blinded hints.
///
CandidateRouteHop::FirstHop(hop) => *hop.payer_node_id,
CandidateRouteHop::PublicHop(hop) => *hop.info.source(),
CandidateRouteHop::PrivateHop(hop) => hop.hint.src_node_id.into(),
- CandidateRouteHop::Blinded(hop) => hop.hint.1.introduction_node_id.into(),
- CandidateRouteHop::OneHopBlinded(hop) => hop.hint.1.introduction_node_id.into(),
+ CandidateRouteHop::Blinded(hop) => *hop.source_node_id,
+ CandidateRouteHop::OneHopBlinded(hop) => *hop.source_node_id,
}
}
/// Returns the target node id of this hop, if known.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.0 {
CandidateRouteHop::Blinded(BlindedPathCandidate { hint, .. }) | CandidateRouteHop::OneHopBlinded(OneHopBlindedPathCandidate { hint, .. }) => {
- "blinded route hint with introduction node id ".fmt(f)?;
- hint.1.introduction_node_id.fmt(f)?;
+ "blinded route hint with introduction node ".fmt(f)?;
+ match &hint.1.introduction_node {
+ IntroductionNode::NodeId(pubkey) => write!(f, "id {}", pubkey)?,
+ IntroductionNode::DirectedShortChannelId(direction, scid) => {
+ match direction {
+ Direction::NodeOne => {
+ write!(f, "one on channel with SCID {}", scid)?;
+ },
+ Direction::NodeTwo => {
+ write!(f, "two on channel with SCID {}", scid)?;
+ },
+ }
+ }
+ }
" and blinding point ".fmt(f)?;
hint.1.blinding_point.fmt(f)
},
return Err(LightningError{err: "Cannot send a payment of 0 msat".to_owned(), action: ErrorAction::IgnoreError});
}
+ let introduction_node_id_cache = payment_params.payee.blinded_route_hints().iter()
+ .map(|(_, path)| path.public_introduction_node_id(network_graph))
+ .collect::<Vec<_>>();
match &payment_params.payee {
Payee::Clear { route_hints, node_id, .. } => {
for route in route_hints.iter() {
}
},
Payee::Blinded { route_hints, .. } => {
- if route_hints.iter().all(|(_, path)| &path.introduction_node_id == our_node_pubkey) {
+ if introduction_node_id_cache.iter().all(|introduction_node_id| *introduction_node_id == Some(&our_node_id)) {
return Err(LightningError{err: "Cannot generate a route to blinded paths if we are the introduction node to all of them".to_owned(), action: ErrorAction::IgnoreError});
}
- for (_, blinded_path) in route_hints.iter() {
+ for ((_, blinded_path), introduction_node_id) in route_hints.iter().zip(introduction_node_id_cache.iter()) {
if blinded_path.blinded_hops.len() == 0 {
return Err(LightningError{err: "0-hop blinded path provided".to_owned(), action: ErrorAction::IgnoreError});
- } else if &blinded_path.introduction_node_id == our_node_pubkey {
+ } else if *introduction_node_id == Some(&our_node_id) {
log_info!(logger, "Got blinded path with ourselves as the introduction node, ignoring");
} else if blinded_path.blinded_hops.len() == 1 &&
- route_hints.iter().any( |(_, p)| p.blinded_hops.len() == 1
- && p.introduction_node_id != blinded_path.introduction_node_id)
+ route_hints
+ .iter().zip(introduction_node_id_cache.iter())
+ .filter(|((_, p), _)| p.blinded_hops.len() == 1)
+ .any(|(_, p_introduction_node_id)| p_introduction_node_id != introduction_node_id)
{
return Err(LightningError{err: format!("1-hop blinded paths must all have matching introduction node ids"), action: ErrorAction::IgnoreError});
}
// inserting first hops suggested by the caller as targets.
// Our search will then attempt to reach them while traversing from the payee node.
let mut first_hop_targets: HashMap<_, Vec<&ChannelDetails>> =
- HashMap::with_capacity(if first_hops.is_some() { first_hops.as_ref().unwrap().len() } else { 0 });
+ hash_map_with_capacity(if first_hops.is_some() { first_hops.as_ref().unwrap().len() } else { 0 });
if let Some(hops) = first_hops {
for chan in hops {
if chan.get_outbound_payment_scid().is_none() {
}
}
- let mut private_hop_key_cache = HashMap::with_capacity(
+ let mut private_hop_key_cache = hash_map_with_capacity(
payment_params.payee.unblinded_route_hints().iter().map(|path| path.0.len()).sum()
);
// Map from node_id to information about the best current path to that node, including feerate
// information.
- let mut dist: HashMap<NodeId, PathBuildingHop> = HashMap::with_capacity(network_nodes.len());
+ let mut dist: HashMap<NodeId, PathBuildingHop> = hash_map_with_capacity(network_nodes.len());
// During routing, if we ignore a path due to an htlc_minimum_msat limit, we set this,
// indicating that we may wish to try again with a higher value, potentially paying to meet an
// is used. Hence, liquidity used in one direction will not offset any used in the opposite
// direction.
let mut used_liquidities: HashMap<CandidateHopId, u64> =
- HashMap::with_capacity(network_nodes.len());
+ hash_map_with_capacity(network_nodes.len());
// Keeping track of how much value we already collected across other paths. Helps to decide
// when we want to stop looking for new paths.
// earlier than general path finding, they will be somewhat prioritized, although currently
// it matters only if the fees are exactly the same.
for (hint_idx, hint) in payment_params.payee.blinded_route_hints().iter().enumerate() {
- let intro_node_id = NodeId::from_pubkey(&hint.1.introduction_node_id);
- let have_intro_node_in_graph =
- // Only add the hops in this route to our candidate set if either
- // we have a direct channel to the first hop or the first hop is
- // in the regular network graph.
- first_hop_targets.get(&intro_node_id).is_some() ||
- network_nodes.get(&intro_node_id).is_some();
- if !have_intro_node_in_graph || our_node_id == intro_node_id { continue }
+ // Only add the hops in this route to our candidate set if either
+ // we have a direct channel to the first hop or the first hop is
+ // in the regular network graph.
+ let source_node_id = match introduction_node_id_cache[hint_idx] {
+ Some(node_id) => node_id,
+ None => match &hint.1.introduction_node {
+ IntroductionNode::NodeId(pubkey) => {
+ let node_id = NodeId::from_pubkey(&pubkey);
+ match first_hop_targets.get_key_value(&node_id).map(|(key, _)| key) {
+ Some(node_id) => node_id,
+ None => continue,
+ }
+ },
+ IntroductionNode::DirectedShortChannelId(direction, scid) => {
+ let first_hop = first_hop_targets.iter().find(|(_, channels)|
+ channels
+ .iter()
+ .any(|details| Some(*scid) == details.get_outbound_payment_scid())
+ );
+ match first_hop {
+ Some((counterparty_node_id, _)) => {
+ direction.select_node_id(&our_node_id, counterparty_node_id)
+ },
+ None => continue,
+ }
+ },
+ },
+ };
+ if our_node_id == *source_node_id { continue }
let candidate = if hint.1.blinded_hops.len() == 1 {
- CandidateRouteHop::OneHopBlinded(OneHopBlindedPathCandidate { hint, hint_idx })
- } else { CandidateRouteHop::Blinded(BlindedPathCandidate { hint, hint_idx }) };
+ CandidateRouteHop::OneHopBlinded(
+ OneHopBlindedPathCandidate { source_node_id, hint, hint_idx }
+ )
+ } else {
+ CandidateRouteHop::Blinded(BlindedPathCandidate { source_node_id, hint, hint_idx })
+ };
let mut path_contribution_msat = path_value_msat;
if let Some(hop_used_msat) = add_entry!(&candidate,
0, path_contribution_msat, 0, 0_u64, 0, 0)
{
path_contribution_msat = hop_used_msat;
} else { continue }
- if let Some(first_channels) = first_hop_targets.get_mut(&NodeId::from_pubkey(&hint.1.introduction_node_id)) {
- sort_first_hop_channels(first_channels, &used_liquidities, recommended_value_msat,
- our_node_pubkey);
+ if let Some(first_channels) = first_hop_targets.get(source_node_id) {
+ let mut first_channels = first_channels.clone();
+ sort_first_hop_channels(
+ &mut first_channels, &used_liquidities, recommended_value_msat, our_node_pubkey
+ );
for details in first_channels {
let first_hop_candidate = CandidateRouteHop::FirstHop(FirstHopCandidate {
details, payer_node_id: &our_node_id,
let mut aggregate_path_contribution_msat = path_value_msat;
for (idx, (hop, prev_hop_id)) in hop_iter.zip(prev_hop_iter).enumerate() {
- let target = private_hop_key_cache.get(&prev_hop_id).unwrap();
+ let target = private_hop_key_cache.get(prev_hop_id).unwrap();
- if let Some(first_channels) = first_hop_targets.get(&target) {
+ if let Some(first_channels) = first_hop_targets.get(target) {
if first_channels.iter().any(|d| d.outbound_scid_alias == Some(hop.short_channel_id)) {
log_trace!(logger, "Ignoring route hint with SCID {} (and any previous) due to it being a direct channel of ours.",
hop.short_channel_id);
let candidate = network_channels
.get(&hop.short_channel_id)
- .and_then(|channel| channel.as_directed_to(&target))
+ .and_then(|channel| channel.as_directed_to(target))
.map(|(info, _)| CandidateRouteHop::PublicHop(PublicHopCandidate {
info,
short_channel_id: hop.short_channel_id,
.saturating_add(1);
// Searching for a direct channel between last checked hop and first_hop_targets
- if let Some(first_channels) = first_hop_targets.get_mut(&target) {
- sort_first_hop_channels(first_channels, &used_liquidities,
- recommended_value_msat, our_node_pubkey);
+ if let Some(first_channels) = first_hop_targets.get(target) {
+ let mut first_channels = first_channels.clone();
+ sort_first_hop_channels(
+ &mut first_channels, &used_liquidities, recommended_value_msat, our_node_pubkey
+ );
for details in first_channels {
let first_hop_candidate = CandidateRouteHop::FirstHop(FirstHopCandidate {
details, payer_node_id: &our_node_id,
// Note that we *must* check if the last hop was added as `add_entry`
// always assumes that the third argument is a node to which we have a
// path.
- if let Some(first_channels) = first_hop_targets.get_mut(&NodeId::from_pubkey(&hop.src_node_id)) {
- sort_first_hop_channels(first_channels, &used_liquidities,
- recommended_value_msat, our_node_pubkey);
+ if let Some(first_channels) = first_hop_targets.get(&NodeId::from_pubkey(&hop.src_node_id)) {
+ let mut first_channels = first_channels.clone();
+ sort_first_hop_channels(
+ &mut first_channels, &used_liquidities, recommended_value_msat, our_node_pubkey
+ );
for details in first_channels {
let first_hop_candidate = CandidateRouteHop::FirstHop(FirstHopCandidate {
details, payer_node_id: &our_node_id,
#[cfg(test)]
mod tests {
- use crate::blinded_path::{BlindedHop, BlindedPath};
+ use crate::blinded_path::{BlindedHop, BlindedPath, IntroductionNode};
use crate::routing::gossip::{NetworkGraph, P2PGossipSync, NodeId, EffectiveCapacity};
use crate::routing::utxo::UtxoResult;
use crate::routing::router::{get_route, build_route_from_hops_internal, add_random_cltv_offset, default_node_features,
use crate::prelude::*;
use crate::sync::Arc;
- use core::convert::TryInto;
-
fn get_channel_details(short_channel_id: Option<u64>, node_id: PublicKey,
features: InitFeatures, outbound_capacity_msat: u64) -> channelmanager::ChannelDetails {
channelmanager::ChannelDetails {
config: None,
feerate_sat_per_1000_weight: None,
channel_shutdown_state: Some(channelmanager::ChannelShutdownState::NotShuttingDown),
+ pending_inbound_htlcs: Vec::new(),
+ pending_outbound_htlcs: Vec::new(),
}
}
// MPP to a 1-hop blinded path for nodes[2]
let bolt12_features = channelmanager::provided_bolt12_invoice_features(&config);
let blinded_path = BlindedPath {
- introduction_node_id: nodes[2],
+ introduction_node: IntroductionNode::NodeId(nodes[2]),
blinding_point: ln_test_utils::pubkey(42),
blinded_hops: vec![BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() }],
};
// MPP to 3 2-hop blinded paths
let mut blinded_path_node_0 = blinded_path.clone();
- blinded_path_node_0.introduction_node_id = nodes[0];
+ blinded_path_node_0.introduction_node = IntroductionNode::NodeId(nodes[0]);
blinded_path_node_0.blinded_hops.push(blinded_path.blinded_hops[0].clone());
let mut node_0_payinfo = blinded_payinfo.clone();
node_0_payinfo.htlc_maximum_msat = 50_000;
let mut blinded_path_node_7 = blinded_path_node_0.clone();
- blinded_path_node_7.introduction_node_id = nodes[7];
+ blinded_path_node_7.introduction_node = IntroductionNode::NodeId(nodes[7]);
let mut node_7_payinfo = blinded_payinfo.clone();
node_7_payinfo.htlc_maximum_msat = 60_000;
let mut blinded_path_node_1 = blinded_path_node_0.clone();
- blinded_path_node_1.introduction_node_id = nodes[1];
+ blinded_path_node_1.introduction_node = IntroductionNode::NodeId(nodes[1]);
let mut node_1_payinfo = blinded_payinfo.clone();
node_1_payinfo.htlc_maximum_msat = 180_000;
if let Some(bt) = &path.blinded_tail {
assert_eq!(path.hops.len() + if bt.hops.len() == 1 { 0 } else { 1 }, 2);
if bt.hops.len() > 1 {
- assert_eq!(path.hops.last().unwrap().pubkey,
+ let network_graph = network_graph.read_only();
+ assert_eq!(
+ NodeId::from_pubkey(&path.hops.last().unwrap().pubkey),
payment_params.payee.blinded_route_hints().iter()
.find(|(p, _)| p.htlc_maximum_msat == path.final_value_msat())
- .map(|(_, p)| p.introduction_node_id).unwrap());
+ .and_then(|(_, p)| p.public_introduction_node_id(&network_graph))
+ .copied()
+ .unwrap()
+ );
} else {
assert_eq!(path.hops.last().unwrap().pubkey, nodes[2]);
}
(route.paths[1].hops[1].short_channel_id == 4 && route.paths[0].hops[1].short_channel_id == 13));
}
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
pub(super) fn random_init_seed() -> u64 {
// Because the default HashMap in std pulls OS randomness, we can use it as a (bad) RNG.
use core::hash::{BuildHasher, Hasher};
}
#[test]
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
fn generate_routes() {
use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
}
#[test]
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
fn generate_routes_mpp() {
use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
}
#[test]
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
fn generate_large_mpp_routes() {
use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
// Make sure this works for blinded route hints.
let blinded_path = BlindedPath {
- introduction_node_id: intermed_node_id,
+ introduction_node: IntroductionNode::NodeId(intermed_node_id),
blinding_point: ln_test_utils::pubkey(42),
blinded_hops: vec![
BlindedHop { blinded_node_id: ln_test_utils::pubkey(42), encrypted_payload: vec![] },
#[test]
fn blinded_route_ser() {
let blinded_path_1 = BlindedPath {
- introduction_node_id: ln_test_utils::pubkey(42),
+ introduction_node: IntroductionNode::NodeId(ln_test_utils::pubkey(42)),
blinding_point: ln_test_utils::pubkey(43),
blinded_hops: vec![
BlindedHop { blinded_node_id: ln_test_utils::pubkey(44), encrypted_payload: Vec::new() },
],
};
let blinded_path_2 = BlindedPath {
- introduction_node_id: ln_test_utils::pubkey(46),
+ introduction_node: IntroductionNode::NodeId(ln_test_utils::pubkey(46)),
blinding_point: ln_test_utils::pubkey(47),
blinded_hops: vec![
BlindedHop { blinded_node_id: ln_test_utils::pubkey(48), encrypted_payload: Vec::new() },
// account for the blinded tail's final amount_msat.
let mut inflight_htlcs = InFlightHtlcs::new();
let blinded_path = BlindedPath {
- introduction_node_id: ln_test_utils::pubkey(43),
+ introduction_node: IntroductionNode::NodeId(ln_test_utils::pubkey(43)),
blinding_point: ln_test_utils::pubkey(48),
blinded_hops: vec![BlindedHop { blinded_node_id: ln_test_utils::pubkey(49), encrypted_payload: Vec::new() }],
};
maybe_announced_channel: false,
},
RouteHop {
- pubkey: blinded_path.introduction_node_id,
+ pubkey: ln_test_utils::pubkey(43),
node_features: NodeFeatures::empty(),
short_channel_id: 43,
channel_features: ChannelFeatures::empty(),
fn blinded_path_cltv_shadow_offset() {
// Make sure we add a shadow offset when sending to blinded paths.
let blinded_path = BlindedPath {
- introduction_node_id: ln_test_utils::pubkey(43),
+ introduction_node: IntroductionNode::NodeId(ln_test_utils::pubkey(43)),
blinding_point: ln_test_utils::pubkey(44),
blinded_hops: vec![
BlindedHop { blinded_node_id: ln_test_utils::pubkey(45), encrypted_payload: Vec::new() },
maybe_announced_channel: false,
},
RouteHop {
- pubkey: blinded_path.introduction_node_id,
+ pubkey: ln_test_utils::pubkey(43),
node_features: NodeFeatures::empty(),
short_channel_id: 43,
channel_features: ChannelFeatures::empty(),
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let mut blinded_path = BlindedPath {
- introduction_node_id: nodes[2],
+ introduction_node: IntroductionNode::NodeId(nodes[2]),
blinding_point: ln_test_utils::pubkey(42),
blinded_hops: Vec::with_capacity(num_blinded_hops),
};
assert_eq!(tail.final_value_msat, 1001);
let final_hop = route.paths[0].hops.last().unwrap();
- assert_eq!(final_hop.pubkey, blinded_path.introduction_node_id);
+ assert_eq!(
+ NodeId::from_pubkey(&final_hop.pubkey),
+ *blinded_path.public_introduction_node_id(&network_graph).unwrap()
+ );
if tail.hops.len() > 1 {
assert_eq!(final_hop.fee_msat,
blinded_payinfo.fee_base_msat as u64 + blinded_payinfo.fee_proportional_millionths as u64 * tail.final_value_msat / 1000000);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let mut invalid_blinded_path = BlindedPath {
- introduction_node_id: nodes[2],
+ introduction_node: IntroductionNode::NodeId(nodes[2]),
blinding_point: ln_test_utils::pubkey(42),
blinded_hops: vec![
BlindedHop { blinded_node_id: ln_test_utils::pubkey(43), encrypted_payload: vec![0; 43] },
};
let mut invalid_blinded_path_2 = invalid_blinded_path.clone();
- invalid_blinded_path_2.introduction_node_id = ln_test_utils::pubkey(45);
+ invalid_blinded_path_2.introduction_node = IntroductionNode::NodeId(ln_test_utils::pubkey(45));
let payment_params = PaymentParameters::blinded(vec![
(blinded_payinfo.clone(), invalid_blinded_path.clone()),
(blinded_payinfo.clone(), invalid_blinded_path_2)]);
_ => panic!("Expected error")
}
- invalid_blinded_path.introduction_node_id = our_id;
+ invalid_blinded_path.introduction_node = IntroductionNode::NodeId(our_id);
let payment_params = PaymentParameters::blinded(vec![(blinded_payinfo.clone(), invalid_blinded_path.clone())]);
let route_params = RouteParameters::from_payment_params_and_value(payment_params, 1001);
match get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger), &scorer,
_ => panic!("Expected error")
}
- invalid_blinded_path.introduction_node_id = ln_test_utils::pubkey(46);
+ invalid_blinded_path.introduction_node = IntroductionNode::NodeId(ln_test_utils::pubkey(46));
invalid_blinded_path.blinded_hops.clear();
let payment_params = PaymentParameters::blinded(vec![(blinded_payinfo, invalid_blinded_path)]);
let route_params = RouteParameters::from_payment_params_and_value(payment_params, 1001);
let bolt12_features = channelmanager::provided_bolt12_invoice_features(&config);
let blinded_path_1 = BlindedPath {
- introduction_node_id: nodes[2],
+ introduction_node: IntroductionNode::NodeId(nodes[2]),
blinding_point: ln_test_utils::pubkey(42),
blinded_hops: vec![
BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() },
get_channel_details(Some(1), nodes[1], InitFeatures::from_le_bytes(vec![0b11]), 10_000_000)];
let blinded_path = BlindedPath {
- introduction_node_id: nodes[1],
+ introduction_node: IntroductionNode::NodeId(nodes[1]),
blinding_point: ln_test_utils::pubkey(42),
blinded_hops: vec![
BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() },
18446744073709551615)];
let blinded_path = BlindedPath {
- introduction_node_id: nodes[1],
+ introduction_node: IntroductionNode::NodeId(nodes[1]),
blinding_point: ln_test_utils::pubkey(42),
blinded_hops: vec![
BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() },
let amt_msat = 21_7020_5185_1423_0019;
let blinded_path = BlindedPath {
- introduction_node_id: our_id,
+ introduction_node: IntroductionNode::NodeId(our_id),
blinding_point: ln_test_utils::pubkey(42),
blinded_hops: vec![
BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() },
(blinded_payinfo.clone(), blinded_path.clone()),
(blinded_payinfo.clone(), blinded_path.clone()),
];
- blinded_hints[1].1.introduction_node_id = nodes[6];
+ blinded_hints[1].1.introduction_node = IntroductionNode::NodeId(nodes[6]);
let bolt12_features = channelmanager::provided_bolt12_invoice_features(&config);
let payment_params = PaymentParameters::blinded(blinded_hints.clone())
let amt_msat = 21_7020_5185_1423_0019;
let blinded_path = BlindedPath {
- introduction_node_id: our_id,
+ introduction_node: IntroductionNode::NodeId(our_id),
blinding_point: ln_test_utils::pubkey(42),
blinded_hops: vec![
BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() },
blinded_hints[1].0.htlc_minimum_msat = 21_7020_5185_1423_0019;
blinded_hints[1].0.htlc_maximum_msat = 1844_6744_0737_0955_1615;
- blinded_hints[2].1.introduction_node_id = nodes[6];
+ blinded_hints[2].1.introduction_node = IntroductionNode::NodeId(nodes[6]);
let bolt12_features = channelmanager::provided_bolt12_invoice_features(&config);
let payment_params = PaymentParameters::blinded(blinded_hints.clone())
let htlc_min = 2_5165_8240;
let payment_params = if blinded_payee {
let blinded_path = BlindedPath {
- introduction_node_id: nodes[0],
+ introduction_node: IntroductionNode::NodeId(nodes[0]),
blinding_point: ln_test_utils::pubkey(42),
blinded_hops: vec![
BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() },
let htlc_mins = [1_4392, 19_7401, 1027, 6_5535];
let payment_params = if blinded_payee {
let blinded_path = BlindedPath {
- introduction_node_id: nodes[0],
+ introduction_node: IntroductionNode::NodeId(nodes[0]),
blinding_point: ln_test_utils::pubkey(42),
blinded_hops: vec![
BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() },
cltv_expiry_delta: 10,
features: BlindedHopFeatures::empty(),
}, BlindedPath {
- introduction_node_id: nodes[0],
+ introduction_node: IntroductionNode::NodeId(nodes[0]),
blinding_point: ln_test_utils::pubkey(42),
blinded_hops: vec![
BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() },
let htlc_mins = [49_0000, 1125_0000];
let payment_params = {
let blinded_path = BlindedPath {
- introduction_node_id: nodes[0],
+ introduction_node: IntroductionNode::NodeId(nodes[0]),
blinding_point: ln_test_utils::pubkey(42),
blinded_hops: vec![
BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() },
}
}
-#[cfg(all(any(test, ldk_bench), not(feature = "no-std")))]
+#[cfg(all(any(test, ldk_bench), feature = "std"))]
pub(crate) mod bench_utils {
use super::*;
use std::fs::File;
use std::time::Duration;
use bitcoin::hashes::Hash;
- use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
+ use bitcoin::secp256k1::SecretKey;
use crate::chain::transaction::OutPoint;
use crate::routing::scoring::ScoreUpdate;
- use crate::sign::{EntropySource, KeysManager};
+ use crate::sign::KeysManager;
use crate::ln::ChannelId;
- use crate::ln::channelmanager::{self, ChannelCounterparty, ChannelDetails};
- use crate::ln::features::Bolt11InvoiceFeatures;
- use crate::routing::gossip::NetworkGraph;
+ use crate::ln::channelmanager::{self, ChannelCounterparty};
use crate::util::config::UserConfig;
- use crate::util::ser::ReadableArgs;
use crate::util::test_utils::TestLogger;
/// Tries to open a network graph file, or panics with a URL to fetch it.
config: None,
feerate_sat_per_1000_weight: None,
channel_shutdown_state: Some(channelmanager::ChannelShutdownState::NotShuttingDown),
+ pending_inbound_htlcs: Vec::new(),
+ pending_outbound_htlcs: Vec::new(),
}
}
use crate::prelude::*;
use core::{cmp, fmt};
-use core::convert::TryInto;
use core::ops::{Deref, DerefMut};
use core::time::Duration;
use crate::io::{self, Read};
}
}
-#[cfg(not(c_bindings))]
+#[cfg(any(not(c_bindings), feature = "_test_utils", test))]
impl<'a, T: Score + 'a> LockableScore<'a> for RwLock<T> {
type ScoreUpdate = T;
type ScoreLookUp = T;
base_penalty_amount_multiplier_msat: 8192,
liquidity_penalty_multiplier_msat: 30_000,
liquidity_penalty_amount_multiplier_msat: 192,
- manual_node_penalties: HashMap::new(),
+ manual_node_penalties: new_hash_map(),
anti_probing_penalty_msat: 250,
considered_impossible_penalty_msat: 1_0000_0000_000,
historical_liquidity_penalty_multiplier_msat: 10_000,
/// Clears the list of manual penalties that are applied during path finding.
pub fn clear_manual_penalties(&mut self) {
- self.manual_node_penalties = HashMap::new();
+ self.manual_node_penalties = new_hash_map();
}
}
liquidity_penalty_amount_multiplier_msat: 0,
historical_liquidity_penalty_multiplier_msat: 0,
historical_liquidity_penalty_amount_multiplier_msat: 0,
- manual_node_penalties: HashMap::new(),
+ manual_node_penalties: new_hash_map(),
anti_probing_penalty_msat: 0,
considered_impossible_penalty_msat: 0,
linear_success_probability: true,
decay_params,
network_graph,
logger,
- channel_liquidities: HashMap::new(),
+ channel_liquidities: new_hash_map(),
}
}
_ => return 0,
};
let source = candidate.source();
- if let Some(penalty) = score_params.manual_node_penalties.get(&target) {
+ if let Some(penalty) = score_params.manual_node_penalties.get(target) {
return *penalty;
}
let amount_msat = usage.amount_msat.saturating_add(usage.inflight_htlc_msat);
let capacity_msat = usage.effective_capacity.as_msat();
self.channel_liquidities
- .get(&scid)
+ .get(scid)
.unwrap_or(&ChannelLiquidity::new(Duration::ZERO))
.as_directed(&source, &target, capacity_msat)
.penalty_msat(amount_msat, score_params)
r: &mut R, args: (ProbabilisticScoringDecayParameters, G, L)
) -> Result<Self, DecodeError> {
let (decay_params, network_graph, logger) = args;
- let mut channel_liquidities = HashMap::new();
+ let mut channel_liquidities = new_hash_map();
read_tlv_fields!(r, {
(0, channel_liquidities, required),
});
#[cfg(test)]
mod tests {
use super::{ChannelLiquidity, HistoricalBucketRangeTracker, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters, ProbabilisticScorer};
- use crate::blinded_path::{BlindedHop, BlindedPath};
+ use crate::blinded_path::{BlindedHop, BlindedPath, IntroductionNode};
use crate::util::config::UserConfig;
use crate::ln::channelmanager;
let mut path = payment_path_for_amount(768);
let recipient_hop = path.hops.pop().unwrap();
let blinded_path = BlindedPath {
- introduction_node_id: path.hops.last().as_ref().unwrap().pubkey,
+ introduction_node: IntroductionNode::NodeId(path.hops.last().as_ref().unwrap().pubkey),
blinding_point: test_utils::pubkey(42),
blinded_hops: vec![
BlindedHop { blinded_node_id: test_utils::pubkey(44), encrypted_payload: Vec::new() }
use crate::routing::gossip::{NetworkGraph, NodeAlias, P2PGossipSync};
use crate::ln::features::{ChannelFeatures, NodeFeatures};
-use crate::ln::msgs::{UnsignedChannelAnnouncement, ChannelAnnouncement, RoutingMessageHandler,
- NodeAnnouncement, UnsignedNodeAnnouncement, ChannelUpdate, UnsignedChannelUpdate, MAX_VALUE_MSAT};
+use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, MAX_VALUE_MSAT, NodeAnnouncement, RoutingMessageHandler, SocketAddress, UnsignedChannelAnnouncement, UnsignedChannelUpdate, UnsignedNodeAnnouncement};
use crate::util::test_utils;
use crate::util::ser::Writeable;
use bitcoin::secp256k1::{PublicKey,SecretKey};
use bitcoin::secp256k1::{Secp256k1, All};
+#[allow(unused)]
use crate::prelude::*;
use crate::sync::{self, Arc};
use crate::routing::gossip::NodeId;
// Using the same keys for LN and BTC ids
-pub(super) fn add_channel(
+pub(crate) fn add_channel(
gossip_sync: &P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>,
secp_ctx: &Secp256k1<All>, node_1_privkey: &SecretKey, node_2_privkey: &SecretKey, features: ChannelFeatures, short_channel_id: u64
) {
};
}
-pub(super) fn add_or_update_node(
+pub(crate) fn add_or_update_node(
gossip_sync: &P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>,
secp_ctx: &Secp256k1<All>, node_privkey: &SecretKey, features: NodeFeatures, timestamp: u32
) {
node_id,
rgb: [0; 3],
alias: NodeAlias([0; 32]),
- addresses: Vec::new(),
+ addresses: vec![SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1000 }],
excess_address_data: Vec::new(),
excess_data: Vec::new(),
};
};
}
-pub(super) fn update_channel(
+pub(crate) fn update_channel(
gossip_sync: &P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>,
secp_ctx: &Secp256k1<All>, node_privkey: &SecretKey, update: UnsignedChannelUpdate
) {
impl PendingChecks {
pub(super) fn new() -> Self {
PendingChecks { internal: Mutex::new(PendingChecksContext {
- channels: HashMap::new(), nodes: HashMap::new(),
+ channels: new_hash_map(), nodes: new_hash_map(),
}) }
}
use super::*;
use crate::routing::gossip::tests::*;
use crate::util::test_utils::{TestChainSource, TestLogger};
- use crate::ln::msgs;
use bitcoin::secp256k1::{Secp256k1, SecretKey};
use bitcoin::blockdata::transaction::Transaction;
-use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
-use bitcoin::secp256k1::ecdsa::Signature;
use bitcoin::secp256k1;
+use bitcoin::secp256k1::ecdsa::Signature;
+use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
-use crate::util::ser::Writeable;
-use crate::ln::PaymentPreimage;
-use crate::ln::chan_utils::{HTLCOutputInCommitment, HolderCommitmentTransaction, CommitmentTransaction, ClosingTransaction};
+use crate::ln::chan_utils::{
+ ClosingTransaction, CommitmentTransaction, HTLCOutputInCommitment, HolderCommitmentTransaction,
+};
use crate::ln::msgs::UnsignedChannelAnnouncement;
+use crate::ln::PaymentPreimage;
+use crate::util::ser::Writeable;
+#[allow(unused_imports)]
use crate::prelude::*;
+
use crate::sign::{ChannelSigner, HTLCDescriptor};
/// A trait to sign Lightning channel transactions as described in
/// irrelevant or duplicate preimages.
//
// TODO: Document the things someone using this interface should enforce before signing.
- fn sign_counterparty_commitment(&self, commitment_tx: &CommitmentTransaction,
- inbound_htlc_preimages: Vec<PaymentPreimage>,
+ fn sign_counterparty_commitment(
+ &self, commitment_tx: &CommitmentTransaction, inbound_htlc_preimages: Vec<PaymentPreimage>,
outbound_htlc_preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<secp256k1::All>,
) -> Result<(Signature, Vec<Signature>), ()>;
/// Creates a signature for a holder's commitment transaction.
/// This may be called multiple times for the same transaction.
///
/// An external signer implementation should check that the commitment has not been revoked.
+ ///
+ /// An `Err` can be returned to signal that the signer is unavailable/cannot produce a valid
+ /// signature and should be retried later. Once the signer is ready to provide a signature after
+ /// previously returning an `Err`, [`ChannelMonitor::signer_unblocked`] must be called on its
+ /// monitor.
+ ///
+ /// [`ChannelMonitor::signer_unblocked`]: crate::chain::channelmonitor::ChannelMonitor::signer_unblocked
//
// TODO: Document the things someone using this interface should enforce before signing.
- fn sign_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction,
- secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
+ fn sign_holder_commitment(
+ &self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>,
+ ) -> Result<Signature, ()>;
/// Same as [`sign_holder_commitment`], but exists only for tests to get access to holder
/// commitment transactions which will be broadcasted later, after the channel has moved on to a
/// newer state. Thus, needs its own method as [`sign_holder_commitment`] may enforce that we
/// only ever get called once.
- #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
- fn unsafe_sign_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction,
- secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
+ #[cfg(any(test, feature = "unsafe_revoked_tx_signing"))]
+ fn unsafe_sign_holder_commitment(
+ &self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>,
+ ) -> Result<Signature, ()>;
/// Create a signature for the given input in a transaction spending an HTLC transaction output
/// or a commitment transaction `to_local` output when our counterparty broadcasts an old state.
///
/// revoked the state which they eventually broadcast. It's not a _holder_ secret key and does
/// not allow the spending of any funds by itself (you need our holder `revocation_secret` to do
/// so).
- fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64,
- per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>
+ ///
+ /// An `Err` can be returned to signal that the signer is unavailable/cannot produce a valid
+ /// signature and should be retried later. Once the signer is ready to provide a signature after
+ /// previously returning an `Err`, [`ChannelMonitor::signer_unblocked`] must be called on its
+ /// monitor.
+ ///
+ /// [`ChannelMonitor::signer_unblocked`]: crate::chain::channelmonitor::ChannelMonitor::signer_unblocked
+ fn sign_justice_revoked_output(
+ &self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey,
+ secp_ctx: &Secp256k1<secp256k1::All>,
) -> Result<Signature, ()>;
/// Create a signature for the given input in a transaction spending a commitment transaction
/// HTLC output when our counterparty broadcasts an old state.
///
/// `htlc` holds HTLC elements (hash, timelock), thus changing the format of the witness script
/// (which is committed to in the BIP 143 signatures).
- fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64,
- per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment,
- secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
+ ///
+ /// An `Err` can be returned to signal that the signer is unavailable/cannot produce a valid
+ /// signature and should be retried later. Once the signer is ready to provide a signature after
+ /// previously returning an `Err`, [`ChannelMonitor::signer_unblocked`] must be called on its
+ /// monitor.
+ ///
+ /// [`ChannelMonitor::signer_unblocked`]: crate::chain::channelmonitor::ChannelMonitor::signer_unblocked
+ fn sign_justice_revoked_htlc(
+ &self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey,
+ htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>,
+ ) -> Result<Signature, ()>;
/// Computes the signature for a commitment transaction's HTLC output used as an input within
/// `htlc_tx`, which spends the commitment transaction at index `input`. The signature returned
/// must be be computed using [`EcdsaSighashType::All`].
/// [`ChannelMonitor`] [replica](https://github.com/lightningdevkit/rust-lightning/blob/main/GLOSSARY.md#monitor-replicas)
/// broadcasts it before receiving the update for the latest commitment transaction.
///
+ /// An `Err` can be returned to signal that the signer is unavailable/cannot produce a valid
+ /// signature and should be retried later. Once the signer is ready to provide a signature after
+ /// previously returning an `Err`, [`ChannelMonitor::signer_unblocked`] must be called on its
+ /// monitor.
+ ///
/// [`EcdsaSighashType::All`]: bitcoin::sighash::EcdsaSighashType::All
/// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor
- fn sign_holder_htlc_transaction(&self, htlc_tx: &Transaction, input: usize,
- htlc_descriptor: &HTLCDescriptor, secp_ctx: &Secp256k1<secp256k1::All>
+ /// [`ChannelMonitor::signer_unblocked`]: crate::chain::channelmonitor::ChannelMonitor::signer_unblocked
+ fn sign_holder_htlc_transaction(
+ &self, htlc_tx: &Transaction, input: usize, htlc_descriptor: &HTLCDescriptor,
+ secp_ctx: &Secp256k1<secp256k1::All>,
) -> Result<Signature, ()>;
/// Create a signature for a claiming transaction for a HTLC output on a counterparty's commitment
/// transaction, either offered or received.
/// detected onchain. It has been generated by our counterparty and is used to derive
/// channel state keys, which are then included in the witness script and committed to in the
/// BIP 143 signature.
- fn sign_counterparty_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, amount: u64,
- per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment,
- secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
+ ///
+ /// An `Err` can be returned to signal that the signer is unavailable/cannot produce a valid
+ /// signature and should be retried later. Once the signer is ready to provide a signature after
+ /// previously returning an `Err`, [`ChannelMonitor::signer_unblocked`] must be called on its
+ /// monitor.
+ ///
+ /// [`ChannelMonitor::signer_unblocked`]: crate::chain::channelmonitor::ChannelMonitor::signer_unblocked
+ fn sign_counterparty_htlc_transaction(
+ &self, htlc_tx: &Transaction, input: usize, amount: u64, per_commitment_point: &PublicKey,
+ htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>,
+ ) -> Result<Signature, ()>;
/// Create a signature for a (proposed) closing transaction.
///
/// Note that, due to rounding, there may be one "missing" satoshi, and either party may have
/// chosen to forgo their output as dust.
- fn sign_closing_transaction(&self, closing_tx: &ClosingTransaction,
- secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
+ fn sign_closing_transaction(
+ &self, closing_tx: &ClosingTransaction, secp_ctx: &Secp256k1<secp256k1::All>,
+ ) -> Result<Signature, ()>;
/// Computes the signature for a commitment transaction's anchor output used as an
/// input within `anchor_tx`, which spends the commitment transaction, at index `input`.
+ ///
+ /// An `Err` can be returned to signal that the signer is unavailable/cannot produce a valid
+ /// signature and should be retried later. Once the signer is ready to provide a signature after
+ /// previously returning an `Err`, [`ChannelMonitor::signer_unblocked`] must be called on its
+ /// monitor.
+ ///
+ /// [`ChannelMonitor::signer_unblocked`]: crate::chain::channelmonitor::ChannelMonitor::signer_unblocked
fn sign_holder_anchor_input(
&self, anchor_tx: &Transaction, input: usize, secp_ctx: &Secp256k1<secp256k1::All>,
) -> Result<Signature, ()>;
///
/// [`NodeSigner::sign_gossip_message`]: crate::sign::NodeSigner::sign_gossip_message
fn sign_channel_announcement_with_funding_key(
- &self, msg: &UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<secp256k1::All>
+ &self, msg: &UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<secp256k1::All>,
) -> Result<Signature, ()>;
}
//! The provided output descriptors follow a custom LDK data format and are currently not fully
//! compatible with Bitcoin Core output descriptors.
+use bitcoin::bip32::{ChildNumber, ExtendedPrivKey, ExtendedPubKey};
use bitcoin::blockdata::locktime::absolute::LockTime;
-use bitcoin::blockdata::transaction::{Transaction, TxOut, TxIn};
-use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
use bitcoin::blockdata::opcodes;
+use bitcoin::blockdata::script::{Builder, Script, ScriptBuf};
+use bitcoin::blockdata::transaction::{Transaction, TxIn, TxOut};
use bitcoin::ecdsa::Signature as EcdsaSignature;
use bitcoin::network::constants::Network;
use bitcoin::psbt::PartiallySignedTransaction;
-use bitcoin::bip32::{ExtendedPrivKey, ExtendedPubKey, ChildNumber};
use bitcoin::sighash;
use bitcoin::sighash::EcdsaSighashType;
use bitcoin::bech32::u5;
-use bitcoin::hashes::{Hash, HashEngine};
+use bitcoin::hash_types::WPubkeyHash;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
-use bitcoin::hash_types::WPubkeyHash;
+use bitcoin::hashes::{Hash, HashEngine};
-#[cfg(taproot)]
-use bitcoin::secp256k1::All;
-use bitcoin::secp256k1::{KeyPair, PublicKey, Scalar, Secp256k1, SecretKey, Signing};
use bitcoin::secp256k1::ecdh::SharedSecret;
use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature};
use bitcoin::secp256k1::schnorr;
-use bitcoin::{secp256k1, Sequence, Witness, Txid};
+#[cfg(taproot)]
+use bitcoin::secp256k1::All;
+use bitcoin::secp256k1::{KeyPair, PublicKey, Scalar, Secp256k1, SecretKey, Signing};
+use bitcoin::{secp256k1, Sequence, Txid, Witness};
-use crate::util::transaction_utils;
-use crate::crypto::utils::{hkdf_extract_expand_twice, sign, sign_with_aux_rand};
-use crate::util::ser::{Writeable, Writer, Readable, ReadableArgs};
use crate::chain::transaction::OutPoint;
+use crate::crypto::utils::{hkdf_extract_expand_twice, sign, sign_with_aux_rand};
+use crate::ln::chan_utils::{
+ get_revokeable_redeemscript, make_funding_redeemscript, ChannelPublicKeys,
+ ChannelTransactionParameters, ClosingTransaction, CommitmentTransaction,
+ HTLCOutputInCommitment, HolderCommitmentTransaction,
+};
use crate::ln::channel::ANCHOR_OUTPUT_VALUE_SATOSHI;
-use crate::ln::{chan_utils, PaymentPreimage};
-use crate::ln::chan_utils::{HTLCOutputInCommitment, make_funding_redeemscript, ChannelPublicKeys, HolderCommitmentTransaction, ChannelTransactionParameters, CommitmentTransaction, ClosingTransaction};
-use crate::ln::channel_keys::{DelayedPaymentBasepoint, DelayedPaymentKey, HtlcKey, HtlcBasepoint, RevocationKey, RevocationBasepoint};
-use crate::ln::msgs::{UnsignedChannelAnnouncement, UnsignedGossipMessage};
+use crate::ln::channel_keys::{
+ add_public_key_tweak, DelayedPaymentBasepoint, DelayedPaymentKey, HtlcBasepoint, HtlcKey,
+ RevocationBasepoint, RevocationKey,
+};
#[cfg(taproot)]
use crate::ln::msgs::PartialSignatureWithNonce;
+use crate::ln::msgs::{UnsignedChannelAnnouncement, UnsignedGossipMessage};
use crate::ln::script::ShutdownScript;
+use crate::ln::{chan_utils, PaymentPreimage};
use crate::offers::invoice::UnsignedBolt12Invoice;
use crate::offers::invoice_request::UnsignedInvoiceRequest;
+use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
+use crate::util::transaction_utils;
-use crate::prelude::*;
-use core::convert::TryInto;
-use core::ops::Deref;
-use core::sync::atomic::{AtomicUsize, Ordering};
-#[cfg(taproot)]
-use musig2::types::{PartialSignature, PublicNonce};
+use crate::crypto::chacha20::ChaCha20;
use crate::io::{self, Error};
use crate::ln::features::ChannelTypeFeatures;
use crate::ln::msgs::{DecodeError, MAX_VALUE_MSAT};
+use crate::prelude::*;
use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
#[cfg(taproot)]
use crate::sign::taproot::TaprootChannelSigner;
use crate::util::atomic_counter::AtomicCounter;
-use crate::crypto::chacha20::ChaCha20;
use crate::util::invoice::construct_invoice_preimage;
+use core::convert::TryInto;
+use core::ops::Deref;
+use core::sync::atomic::{AtomicUsize, Ordering};
+#[cfg(taproot)]
+use musig2::types::{PartialSignature, PublicNonce};
pub(crate) mod type_resolver;
pub channel_keys_id: [u8; 32],
/// The value of the channel which this output originated from, possibly indirectly.
pub channel_value_satoshis: u64,
+ /// The channel public keys and other parameters needed to generate a spending transaction or
+ /// to provide to a re-derived signer through [`ChannelSigner::provide_channel_parameters`].
+ ///
+ /// Added as optional, but always `Some` if the descriptor was produced in v0.0.123 or later.
+ pub channel_transaction_parameters: Option<ChannelTransactionParameters>,
}
+
impl DelayedPaymentOutputDescriptor {
/// The maximum length a well-formed witness spending one of these should have.
/// Note: If you have the grind_signatures feature enabled, this will be at least 1 byte
/// shorter.
// Calculated as 1 byte length + 73 byte signature, 1 byte empty vec push, 1 byte length plus
// redeemscript push length.
- pub const MAX_WITNESS_LENGTH: u64 = 1 + 73 + 1 + chan_utils::REVOKEABLE_REDEEMSCRIPT_MAX_LENGTH as u64 + 1;
+ pub const MAX_WITNESS_LENGTH: u64 =
+ 1 + 73 + 1 + chan_utils::REVOKEABLE_REDEEMSCRIPT_MAX_LENGTH as u64 + 1;
}
impl_writeable_tlv_based!(DelayedPaymentOutputDescriptor, {
(8, revocation_pubkey, required),
(10, channel_keys_id, required),
(12, channel_value_satoshis, required),
+ (13, channel_transaction_parameters, option),
});
pub(crate) const P2WPKH_WITNESS_WEIGHT: u64 = 1 /* num stack items */ +
1 /* pubkey length */ +
33 /* pubkey */;
+/// Witness weight for satisying a P2TR key-path spend.
+pub(crate) const P2TR_KEY_PATH_WITNESS_WEIGHT: u64 = 1 /* witness items */
+ + 1 /* schnorr sig len */ + 64 /* schnorr sig */;
+
/// Information about a spendable output to our "payment key".
///
/// See [`SpendableOutputDescriptor::StaticPaymentOutput`] for more details on how to spend this.
/// Added as optional, but always `Some` if the descriptor was produced in v0.0.117 or later.
pub channel_transaction_parameters: Option<ChannelTransactionParameters>,
}
+
impl StaticPaymentOutputDescriptor {
/// Returns the `witness_script` of the spendable output.
///
/// Note that this will only return `Some` for [`StaticPaymentOutputDescriptor`]s that
/// originated from an anchor outputs channel, as they take the form of a P2WSH script.
pub fn witness_script(&self) -> Option<ScriptBuf> {
- self.channel_transaction_parameters.as_ref()
- .and_then(|channel_params|
- if channel_params.channel_type_features.supports_anchors_zero_fee_htlc_tx() {
- let payment_point = channel_params.holder_pubkeys.payment_point;
- Some(chan_utils::get_to_countersignatory_with_anchors_redeemscript(&payment_point))
- } else {
- None
- }
- )
+ self.channel_transaction_parameters.as_ref().and_then(|channel_params| {
+ if channel_params.supports_anchors() {
+ let payment_point = channel_params.holder_pubkeys.payment_point;
+ Some(chan_utils::get_to_countersignatory_with_anchors_redeemscript(&payment_point))
+ } else {
+ None
+ }
+ })
}
/// The maximum length a well-formed witness spending one of these should have.
/// Note: If you have the grind_signatures feature enabled, this will be at least 1 byte
/// shorter.
pub fn max_witness_length(&self) -> u64 {
- if self.channel_transaction_parameters.as_ref()
- .map(|channel_params| channel_params.channel_type_features.supports_anchors_zero_fee_htlc_tx())
- .unwrap_or(false)
- {
+ if self.channel_transaction_parameters.as_ref().map_or(false, |p| p.supports_anchors()) {
let witness_script_weight = 1 /* pubkey push */ + 33 /* pubkey */ +
1 /* OP_CHECKSIGVERIFY */ + 1 /* OP_1 */ + 1 /* OP_CHECKSEQUENCEVERIFY */;
1 /* num witness items */ + 1 /* sig push */ + 73 /* sig including sighash flag */ +
///
/// For channels which were generated prior to LDK 0.0.119, no such argument existed,
/// however this field may still be filled in if such data is available.
- channel_keys_id: Option<[u8; 32]>
+ channel_keys_id: Option<[u8; 32]>,
},
/// An output to a P2WSH script which can be spent with a single signature after an `OP_CSV`
/// delay.
///
/// This is not exported to bindings users as there is no standard serialization for an input.
/// See [`Self::create_spendable_outputs_psbt`] instead.
- pub fn to_psbt_input(&self) -> bitcoin::psbt::Input {
+ ///
+ /// The proprietary field is used to store add tweak for the signing key of this transaction.
+ /// See the [`DelayedPaymentBasepoint::derive_add_tweak`] docs for more info on add tweak and how to use it.
+ ///
+ /// To get the proprietary field use:
+ /// ```
+ /// use bitcoin::psbt::{PartiallySignedTransaction};
+ /// use bitcoin::hashes::hex::FromHex;
+ ///
+ /// # let s = "70736274ff0100520200000001dee978529ab3e61a2987bea5183713d0e6d5ceb5ac81100fdb54a1a2\
+ /// # 69cef505000000000090000000011f26000000000000160014abb3ab63280d4ccc5c11d6b50fd427a8\
+ /// # e19d6470000000000001012b10270000000000002200200afe4736760d814a2651bae63b572d935d9a\
+ /// # b74a1a16c01774e341a32afa763601054d63210394a27a700617f5b7aee72bd4f8076b5770a582b7fb\
+ /// # d1d4ee2ea3802cd3cfbe2067029000b27521034629b1c8fdebfaeb58a74cd181f485e2c462e594cb30\
+ /// # 34dee655875f69f6c7c968ac20fc144c444b5f7370656e6461626c655f6f7574707574006164645f74\
+ /// # 7765616b20a86534f38ad61dc580ef41c3886204adf0911b81619c1ad7a2f5b5de39a2ba600000";
+ /// # let psbt = PartiallySignedTransaction::deserialize(<Vec<u8> as FromHex>::from_hex(s).unwrap().as_slice()).unwrap();
+ /// let key = bitcoin::psbt::raw::ProprietaryKey {
+ /// prefix: "LDK_spendable_output".as_bytes().to_vec(),
+ /// subtype: 0,
+ /// key: "add_tweak".as_bytes().to_vec(),
+ /// };
+ /// let value = psbt
+ /// .inputs
+ /// .first()
+ /// .expect("Unable to get add tweak as there are no inputs")
+ /// .proprietary
+ /// .get(&key)
+ /// .map(|x| x.to_owned());
+ /// ```
+ pub fn to_psbt_input<T: secp256k1::Signing>(
+ &self, secp_ctx: &Secp256k1<T>,
+ ) -> bitcoin::psbt::Input {
match self {
SpendableOutputDescriptor::StaticOutput { output, .. } => {
// Is a standard P2WPKH, no need for witness script
- bitcoin::psbt::Input {
- witness_utxo: Some(output.clone()),
- ..Default::default()
- }
+ bitcoin::psbt::Input { witness_utxo: Some(output.clone()), ..Default::default() }
},
- SpendableOutputDescriptor::DelayedPaymentOutput(descriptor) => {
- // TODO we could add the witness script as well
+ SpendableOutputDescriptor::DelayedPaymentOutput(DelayedPaymentOutputDescriptor {
+ channel_transaction_parameters,
+ per_commitment_point,
+ revocation_pubkey,
+ to_self_delay,
+ output,
+ ..
+ }) => {
+ let delayed_payment_basepoint = channel_transaction_parameters
+ .as_ref()
+ .map(|params| params.holder_pubkeys.delayed_payment_basepoint);
+
+ let (witness_script, add_tweak) =
+ if let Some(basepoint) = delayed_payment_basepoint.as_ref() {
+ // Required to derive signing key: privkey = basepoint_secret + SHA256(per_commitment_point || basepoint)
+ let add_tweak = basepoint.derive_add_tweak(&per_commitment_point);
+ let payment_key = DelayedPaymentKey(add_public_key_tweak(
+ secp_ctx,
+ &basepoint.to_public_key(),
+ &add_tweak,
+ ));
+
+ (
+ Some(get_revokeable_redeemscript(
+ &revocation_pubkey,
+ *to_self_delay,
+ &payment_key,
+ )),
+ Some(add_tweak),
+ )
+ } else {
+ (None, None)
+ };
+
bitcoin::psbt::Input {
- witness_utxo: Some(descriptor.output.clone()),
+ witness_utxo: Some(output.clone()),
+ witness_script,
+ proprietary: add_tweak
+ .map(|add_tweak| {
+ [(
+ bitcoin::psbt::raw::ProprietaryKey {
+ // A non standard namespace for spendable outputs, used to store the tweak needed
+ // to derive the private key
+ prefix: "LDK_spendable_output".as_bytes().to_vec(),
+ subtype: 0,
+ key: "add_tweak".as_bytes().to_vec(),
+ },
+ add_tweak.as_byte_array().to_vec(),
+ )]
+ .into_iter()
+ .collect()
+ })
+ .unwrap_or_default(),
..Default::default()
}
},
- SpendableOutputDescriptor::StaticPaymentOutput(descriptor) => {
- // TODO we could add the witness script as well
- bitcoin::psbt::Input {
- witness_utxo: Some(descriptor.output.clone()),
- ..Default::default()
- }
+ SpendableOutputDescriptor::StaticPaymentOutput(descriptor) => bitcoin::psbt::Input {
+ witness_utxo: Some(descriptor.output.clone()),
+ witness_script: descriptor.witness_script(),
+ ..Default::default()
},
}
}
/// does not match the one we can spend.
///
/// We do not enforce that outputs meet the dust limit or that any output scripts are standard.
- pub fn create_spendable_outputs_psbt(descriptors: &[&SpendableOutputDescriptor], outputs: Vec<TxOut>, change_destination_script: ScriptBuf, feerate_sat_per_1000_weight: u32, locktime: Option<LockTime>) -> Result<(PartiallySignedTransaction, u64), ()> {
+ pub fn create_spendable_outputs_psbt<T: secp256k1::Signing>(
+ secp_ctx: &Secp256k1<T>, descriptors: &[&SpendableOutputDescriptor], outputs: Vec<TxOut>,
+ change_destination_script: ScriptBuf, feerate_sat_per_1000_weight: u32,
+ locktime: Option<LockTime>,
+ ) -> Result<(PartiallySignedTransaction, u64), ()> {
let mut input = Vec::with_capacity(descriptors.len());
let mut input_value = 0;
let mut witness_weight = 0;
- let mut output_set = HashSet::with_capacity(descriptors.len());
+ let mut output_set = hash_set_with_capacity(descriptors.len());
for outp in descriptors {
match outp {
SpendableOutputDescriptor::StaticPaymentOutput(descriptor) => {
- if !output_set.insert(descriptor.outpoint) { return Err(()); }
- let sequence =
- if descriptor.channel_transaction_parameters.as_ref()
- .map(|channel_params| channel_params.channel_type_features.supports_anchors_zero_fee_htlc_tx())
- .unwrap_or(false)
- {
- Sequence::from_consensus(1)
- } else {
- Sequence::ZERO
- };
+ if !output_set.insert(descriptor.outpoint) {
+ return Err(());
+ }
+ let sequence = if descriptor
+ .channel_transaction_parameters
+ .as_ref()
+ .map_or(false, |p| p.supports_anchors())
+ {
+ Sequence::from_consensus(1)
+ } else {
+ Sequence::ZERO
+ };
input.push(TxIn {
previous_output: descriptor.outpoint.into_bitcoin_outpoint(),
script_sig: ScriptBuf::new(),
});
witness_weight += descriptor.max_witness_length();
#[cfg(feature = "grind_signatures")]
- { witness_weight -= 1; } // Guarantees a low R signature
+ {
+ // Guarantees a low R signature
+ witness_weight -= 1;
+ }
input_value += descriptor.output.value;
},
SpendableOutputDescriptor::DelayedPaymentOutput(descriptor) => {
- if !output_set.insert(descriptor.outpoint) { return Err(()); }
+ if !output_set.insert(descriptor.outpoint) {
+ return Err(());
+ }
input.push(TxIn {
previous_output: descriptor.outpoint.into_bitcoin_outpoint(),
script_sig: ScriptBuf::new(),
});
witness_weight += DelayedPaymentOutputDescriptor::MAX_WITNESS_LENGTH;
#[cfg(feature = "grind_signatures")]
- { witness_weight -= 1; } // Guarantees a low R signature
+ {
+ // Guarantees a low R signature
+ witness_weight -= 1;
+ }
input_value += descriptor.output.value;
},
SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output, .. } => {
- if !output_set.insert(*outpoint) { return Err(()); }
+ if !output_set.insert(*outpoint) {
+ return Err(());
+ }
input.push(TxIn {
previous_output: outpoint.into_bitcoin_outpoint(),
script_sig: ScriptBuf::new(),
});
witness_weight += 1 + 73 + 34;
#[cfg(feature = "grind_signatures")]
- { witness_weight -= 1; } // Guarantees a low R signature
+ {
+ // Guarantees a low R signature
+ witness_weight -= 1;
+ }
input_value += output.value;
- }
+ },
+ }
+ if input_value > MAX_VALUE_MSAT / 1000 {
+ return Err(());
}
- if input_value > MAX_VALUE_MSAT / 1000 { return Err(()); }
}
let mut tx = Transaction {
version: 2,
input,
output: outputs,
};
- let expected_max_weight =
- transaction_utils::maybe_add_change_output(&mut tx, input_value, witness_weight, feerate_sat_per_1000_weight, change_destination_script)?;
-
- let psbt_inputs = descriptors.iter().map(|d| d.to_psbt_input()).collect::<Vec<_>>();
+ let expected_max_weight = transaction_utils::maybe_add_change_output(
+ &mut tx,
+ input_value,
+ witness_weight,
+ feerate_sat_per_1000_weight,
+ change_destination_script,
+ )?;
+
+ let psbt_inputs =
+ descriptors.iter().map(|d| d.to_psbt_input(&secp_ctx)).collect::<Vec<_>>();
let psbt = PartiallySignedTransaction {
inputs: psbt_inputs,
outputs: vec![Default::default(); tx.output.len()],
}
impl_writeable_tlv_based!(ChannelDerivationParameters, {
- (0, value_satoshis, required),
- (2, keys_id, required),
- (4, transaction_parameters, required),
+ (0, value_satoshis, required),
+ (2, keys_id, required),
+ (4, transaction_parameters, required),
});
/// A descriptor used to sign for a commitment transaction's HTLC output.
/// taken.
pub preimage: Option<PaymentPreimage>,
/// The counterparty's signature required to spend the HTLC output.
- pub counterparty_sig: Signature
+ pub counterparty_sig: Signature,
}
impl_writeable_tlv_based!(HTLCDescriptor, {
/// Returns the UTXO to be spent by the HTLC input, which can be obtained via
/// [`Self::unsigned_tx_input`].
- pub fn previous_utxo<C: secp256k1::Signing + secp256k1::Verification>(&self, secp: &Secp256k1<C>) -> TxOut {
+ pub fn previous_utxo<C: secp256k1::Signing + secp256k1::Verification>(
+ &self, secp: &Secp256k1<C>,
+ ) -> TxOut {
TxOut {
script_pubkey: self.witness_script(secp).to_v0_p2wsh(),
value: self.htlc.amount_msat / 1000,
/// transaction.
pub fn unsigned_tx_input(&self) -> TxIn {
chan_utils::build_htlc_input(
- &self.commitment_txid, &self.htlc, &self.channel_derivation_parameters.transaction_parameters.channel_type_features
+ &self.commitment_txid,
+ &self.htlc,
+ &self.channel_derivation_parameters.transaction_parameters.channel_type_features,
)
}
/// Returns the delayed output created as a result of spending the HTLC output in the commitment
/// transaction.
- pub fn tx_output<C: secp256k1::Signing + secp256k1::Verification>(&self, secp: &Secp256k1<C>) -> TxOut {
- let channel_params = self.channel_derivation_parameters.transaction_parameters.as_holder_broadcastable();
+ pub fn tx_output<C: secp256k1::Signing + secp256k1::Verification>(
+ &self, secp: &Secp256k1<C>,
+ ) -> TxOut {
+ let channel_params =
+ self.channel_derivation_parameters.transaction_parameters.as_holder_broadcastable();
let broadcaster_keys = channel_params.broadcaster_pubkeys();
let counterparty_keys = channel_params.countersignatory_pubkeys();
let broadcaster_delayed_key = DelayedPaymentKey::from_basepoint(
- secp, &broadcaster_keys.delayed_payment_basepoint, &self.per_commitment_point
+ secp,
+ &broadcaster_keys.delayed_payment_basepoint,
+ &self.per_commitment_point,
+ );
+ let counterparty_revocation_key = &RevocationKey::from_basepoint(
+ &secp,
+ &counterparty_keys.revocation_basepoint,
+ &self.per_commitment_point,
);
- let counterparty_revocation_key = &RevocationKey::from_basepoint(&secp, &counterparty_keys.revocation_basepoint, &self.per_commitment_point);
chan_utils::build_htlc_output(
- self.feerate_per_kw, channel_params.contest_delay(), &self.htlc,
- channel_params.channel_type_features(), &broadcaster_delayed_key, &counterparty_revocation_key
+ self.feerate_per_kw,
+ channel_params.contest_delay(),
+ &self.htlc,
+ channel_params.channel_type_features(),
+ &broadcaster_delayed_key,
+ &counterparty_revocation_key,
)
}
/// Returns the witness script of the HTLC output in the commitment transaction.
- pub fn witness_script<C: secp256k1::Signing + secp256k1::Verification>(&self, secp: &Secp256k1<C>) -> ScriptBuf {
- let channel_params = self.channel_derivation_parameters.transaction_parameters.as_holder_broadcastable();
+ pub fn witness_script<C: secp256k1::Signing + secp256k1::Verification>(
+ &self, secp: &Secp256k1<C>,
+ ) -> ScriptBuf {
+ let channel_params =
+ self.channel_derivation_parameters.transaction_parameters.as_holder_broadcastable();
let broadcaster_keys = channel_params.broadcaster_pubkeys();
let counterparty_keys = channel_params.countersignatory_pubkeys();
let broadcaster_htlc_key = HtlcKey::from_basepoint(
- secp, &broadcaster_keys.htlc_basepoint, &self.per_commitment_point
+ secp,
+ &broadcaster_keys.htlc_basepoint,
+ &self.per_commitment_point,
);
let counterparty_htlc_key = HtlcKey::from_basepoint(
- secp, &counterparty_keys.htlc_basepoint, &self.per_commitment_point,
+ secp,
+ &counterparty_keys.htlc_basepoint,
+ &self.per_commitment_point,
+ );
+ let counterparty_revocation_key = &RevocationKey::from_basepoint(
+ &secp,
+ &counterparty_keys.revocation_basepoint,
+ &self.per_commitment_point,
);
- let counterparty_revocation_key = &RevocationKey::from_basepoint(&secp, &counterparty_keys.revocation_basepoint, &self.per_commitment_point);
chan_utils::get_htlc_redeemscript_with_explicit_keys(
- &self.htlc, channel_params.channel_type_features(), &broadcaster_htlc_key, &counterparty_htlc_key,
+ &self.htlc,
+ channel_params.channel_type_features(),
+ &broadcaster_htlc_key,
+ &counterparty_htlc_key,
&counterparty_revocation_key,
)
}
/// transaction.
pub fn tx_input_witness(&self, signature: &Signature, witness_script: &Script) -> Witness {
chan_utils::build_htlc_input_witness(
- signature, &self.counterparty_sig, &self.preimage, witness_script,
- &self.channel_derivation_parameters.transaction_parameters.channel_type_features
+ signature,
+ &self.counterparty_sig,
+ &self.preimage,
+ witness_script,
+ &self.channel_derivation_parameters.transaction_parameters.channel_type_features,
)
}
/// Derives the channel signer required to sign the HTLC input.
- pub fn derive_channel_signer<S: WriteableEcdsaChannelSigner, SP: Deref>(&self, signer_provider: &SP) -> S
+ pub fn derive_channel_signer<S: WriteableEcdsaChannelSigner, SP: Deref>(
+ &self, signer_provider: &SP,
+ ) -> S
where
- SP::Target: SignerProvider<EcdsaSigner= S>
+ SP::Target: SignerProvider<EcdsaSigner = S>,
{
let mut signer = signer_provider.derive_channel_signer(
self.channel_derivation_parameters.value_satoshis,
self.channel_derivation_parameters.keys_id,
);
- signer.provide_channel_parameters(&self.channel_derivation_parameters.transaction_parameters);
+ signer
+ .provide_channel_parameters(&self.channel_derivation_parameters.transaction_parameters);
signer
}
}
/// Gets the per-commitment point for a specific commitment number
///
/// Note that the commitment number starts at `(1 << 48) - 1` and counts backwards.
- fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1<secp256k1::All>) -> PublicKey;
+ fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1<secp256k1::All>)
+ -> PublicKey;
/// Gets the commitment secret for a specific commitment number as part of the revocation process
///
///
/// Note that all the relevant preimages will be provided, but there may also be additional
/// irrelevant or duplicate preimages.
- fn validate_holder_commitment(&self, holder_tx: &HolderCommitmentTransaction,
- outbound_htlc_preimages: Vec<PaymentPreimage>) -> Result<(), ()>;
+ fn validate_holder_commitment(
+ &self, holder_tx: &HolderCommitmentTransaction,
+ outbound_htlc_preimages: Vec<PaymentPreimage>,
+ ) -> Result<(), ()>;
/// Validate the counterparty's revocation.
///
/// should be resolved to allow LDK to resume forwarding HTLCs.
///
/// Errors if the [`Recipient`] variant is not supported by the implementation.
- fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result<SharedSecret, ()>;
+ fn ecdh(
+ &self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>,
+ ) -> Result<SharedSecret, ()>;
/// Sign an invoice.
///
/// The secret key used to sign the invoice is dependent on the [`Recipient`].
///
/// Errors if the [`Recipient`] variant is not supported by the implementation.
- fn sign_invoice(&self, hrp_bytes: &[u8], invoice_data: &[u5], recipient: Recipient) -> Result<RecoverableSignature, ()>;
+ fn sign_invoice(
+ &self, hrp_bytes: &[u8], invoice_data: &[u5], recipient: Recipient,
+ ) -> Result<RecoverableSignature, ()>;
/// Signs the [`TaggedHash`] of a BOLT 12 invoice request.
///
///
/// [`TaggedHash`]: crate::offers::merkle::TaggedHash
fn sign_bolt12_invoice_request(
- &self, invoice_request: &UnsignedInvoiceRequest
+ &self, invoice_request: &UnsignedInvoiceRequest,
) -> Result<schnorr::Signature, ()>;
/// Signs the [`TaggedHash`] of a BOLT 12 invoice.
///
/// [`TaggedHash`]: crate::offers::merkle::TaggedHash
fn sign_bolt12_invoice(
- &self, invoice: &UnsignedBolt12Invoice
+ &self, invoice: &UnsignedBolt12Invoice,
) -> Result<schnorr::Signature, ()>;
/// Sign a gossip message.
fn sign_gossip_message(&self, msg: UnsignedGossipMessage) -> Result<Signature, ()>;
}
+/// A trait that describes a wallet capable of creating a spending [`Transaction`] from a set of
+/// [`SpendableOutputDescriptor`]s.
+pub trait OutputSpender {
+ /// Creates a [`Transaction`] which spends the given descriptors to the given outputs, plus an
+ /// output to the given change destination (if sufficient change value remains). The
+ /// transaction will have a feerate, at least, of the given value.
+ ///
+ /// The `locktime` argument is used to set the transaction's locktime. If `None`, the
+ /// transaction will have a locktime of 0. It it recommended to set this to the current block
+ /// height to avoid fee sniping, unless you have some specific reason to use a different
+ /// locktime.
+ ///
+ /// Returns `Err(())` if the output value is greater than the input value minus required fee,
+ /// if a descriptor was duplicated, or if an output descriptor `script_pubkey`
+ /// does not match the one we can spend.
+ fn spend_spendable_outputs<C: Signing>(
+ &self, descriptors: &[&SpendableOutputDescriptor], outputs: Vec<TxOut>,
+ change_destination_script: ScriptBuf, feerate_sat_per_1000_weight: u32,
+ locktime: Option<LockTime>, secp_ctx: &Secp256k1<C>,
+ ) -> Result<Transaction, ()>;
+}
+
+// Primarily needed in doctests because of https://github.com/rust-lang/rust/issues/67295
+/// A dynamic [`SignerProvider`] temporarily needed for doc tests.
+#[cfg(taproot)]
+#[doc(hidden)]
+#[deprecated(note = "Remove once taproot cfg is removed")]
+pub type DynSignerProvider =
+ dyn SignerProvider<EcdsaSigner = InMemorySigner, TaprootSigner = InMemorySigner>;
+
+/// A dynamic [`SignerProvider`] temporarily needed for doc tests.
+#[cfg(not(taproot))]
+#[doc(hidden)]
+#[deprecated(note = "Remove once taproot cfg is removed")]
+pub type DynSignerProvider = dyn SignerProvider<EcdsaSigner = InMemorySigner>;
+
/// A trait that can return signer instances for individual channels.
pub trait SignerProvider {
/// A type which implements [`WriteableEcdsaChannelSigner`] which will be returned by [`Self::derive_channel_signer`].
/// `channel_keys_id`.
///
/// This method must return a different value each time it is called.
- fn generate_channel_keys_id(&self, inbound: bool, channel_value_satoshis: u64, user_channel_id: u128) -> [u8; 32];
+ fn generate_channel_keys_id(
+ &self, inbound: bool, channel_value_satoshis: u64, user_channel_id: u128,
+ ) -> [u8; 32];
/// Derives the private key material backing a `Signer`.
///
/// [`SignerProvider::generate_channel_keys_id`]. Otherwise, an existing `Signer` can be
/// re-derived from its `channel_keys_id`, which can be obtained through its trait method
/// [`ChannelSigner::channel_keys_id`].
- fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> Self::EcdsaSigner;
+ fn derive_channel_signer(
+ &self, channel_value_satoshis: u64, channel_keys_id: [u8; 32],
+ ) -> Self::EcdsaSigner;
/// Reads a [`Signer`] for this [`SignerProvider`] from the given input stream.
/// This is only called during deserialization of other objects which contain
fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()>;
}
+/// A helper trait that describes an on-chain wallet capable of returning a (change) destination
+/// script.
+pub trait ChangeDestinationSource {
+ /// Returns a script pubkey which can be used as a change destination for
+ /// [`OutputSpender::spend_spendable_outputs`].
+ ///
+ /// This method should return a different value each time it is called, to avoid linking
+ /// on-chain funds controlled to the same user.
+ fn get_change_destination_script(&self) -> Result<ScriptBuf, ()>;
+}
+
/// A simple implementation of [`WriteableEcdsaChannelSigner`] that just keeps the private keys in memory.
///
/// This implementation performs no policy checks and is insufficient by itself as
channel_value_satoshis: u64,
/// Key derivation parameters.
channel_keys_id: [u8; 32],
- /// Seed from which all randomness produced is derived from.
- rand_bytes_unique_start: [u8; 32],
- /// Tracks the number of times we've produced randomness to ensure we don't return the same
- /// bytes twice.
- rand_bytes_index: AtomicCounter,
+ /// A source of random bytes.
+ entropy_source: RandomBytes,
}
impl PartialEq for InMemorySigner {
fn eq(&self, other: &Self) -> bool {
- self.funding_key == other.funding_key &&
- self.revocation_base_key == other.revocation_base_key &&
- self.payment_key == other.payment_key &&
- self.delayed_payment_base_key == other.delayed_payment_base_key &&
- self.htlc_base_key == other.htlc_base_key &&
- self.commitment_seed == other.commitment_seed &&
- self.holder_channel_pubkeys == other.holder_channel_pubkeys &&
- self.channel_parameters == other.channel_parameters &&
- self.channel_value_satoshis == other.channel_value_satoshis &&
- self.channel_keys_id == other.channel_keys_id
+ self.funding_key == other.funding_key
+ && self.revocation_base_key == other.revocation_base_key
+ && self.payment_key == other.payment_key
+ && self.delayed_payment_base_key == other.delayed_payment_base_key
+ && self.htlc_base_key == other.htlc_base_key
+ && self.commitment_seed == other.commitment_seed
+ && self.holder_channel_pubkeys == other.holder_channel_pubkeys
+ && self.channel_parameters == other.channel_parameters
+ && self.channel_value_satoshis == other.channel_value_satoshis
+ && self.channel_keys_id == other.channel_keys_id
}
}
channel_parameters: self.channel_parameters.clone(),
channel_value_satoshis: self.channel_value_satoshis,
channel_keys_id: self.channel_keys_id,
- rand_bytes_unique_start: self.get_secure_random_bytes(),
- rand_bytes_index: AtomicCounter::new(),
+ entropy_source: RandomBytes::new(self.get_secure_random_bytes()),
}
}
}
impl InMemorySigner {
/// Creates a new [`InMemorySigner`].
pub fn new<C: Signing>(
- secp_ctx: &Secp256k1<C>,
- funding_key: SecretKey,
- revocation_base_key: SecretKey,
- payment_key: SecretKey,
- delayed_payment_base_key: SecretKey,
- htlc_base_key: SecretKey,
- commitment_seed: [u8; 32],
- channel_value_satoshis: u64,
- channel_keys_id: [u8; 32],
+ secp_ctx: &Secp256k1<C>, funding_key: SecretKey, revocation_base_key: SecretKey,
+ payment_key: SecretKey, delayed_payment_base_key: SecretKey, htlc_base_key: SecretKey,
+ commitment_seed: [u8; 32], channel_value_satoshis: u64, channel_keys_id: [u8; 32],
rand_bytes_unique_start: [u8; 32],
) -> InMemorySigner {
- let holder_channel_pubkeys =
- InMemorySigner::make_holder_keys(secp_ctx, &funding_key, &revocation_base_key,
- &payment_key, &delayed_payment_base_key,
- &htlc_base_key);
+ let holder_channel_pubkeys = InMemorySigner::make_holder_keys(
+ secp_ctx,
+ &funding_key,
+ &revocation_base_key,
+ &payment_key,
+ &delayed_payment_base_key,
+ &htlc_base_key,
+ );
InMemorySigner {
funding_key,
revocation_base_key,
holder_channel_pubkeys,
channel_parameters: None,
channel_keys_id,
- rand_bytes_unique_start,
- rand_bytes_index: AtomicCounter::new(),
+ entropy_source: RandomBytes::new(rand_bytes_unique_start),
}
}
- fn make_holder_keys<C: Signing>(secp_ctx: &Secp256k1<C>,
- funding_key: &SecretKey,
- revocation_base_key: &SecretKey,
- payment_key: &SecretKey,
- delayed_payment_base_key: &SecretKey,
- htlc_base_key: &SecretKey) -> ChannelPublicKeys {
+ fn make_holder_keys<C: Signing>(
+ secp_ctx: &Secp256k1<C>, funding_key: &SecretKey, revocation_base_key: &SecretKey,
+ payment_key: &SecretKey, delayed_payment_base_key: &SecretKey, htlc_base_key: &SecretKey,
+ ) -> ChannelPublicKeys {
let from_secret = |s: &SecretKey| PublicKey::from_secret_key(secp_ctx, s);
ChannelPublicKeys {
funding_pubkey: from_secret(&funding_key),
revocation_basepoint: RevocationBasepoint::from(from_secret(&revocation_base_key)),
payment_point: from_secret(&payment_key),
- delayed_payment_basepoint: DelayedPaymentBasepoint::from(from_secret(&delayed_payment_base_key)),
+ delayed_payment_basepoint: DelayedPaymentBasepoint::from(from_secret(
+ &delayed_payment_base_key,
+ )),
htlc_basepoint: HtlcBasepoint::from(from_secret(&htlc_base_key)),
}
}
/// Will return `None` if [`ChannelSigner::provide_channel_parameters`] has not been called.
/// In general, this is safe to `unwrap` only in [`ChannelSigner`] implementation.
pub fn counterparty_pubkeys(&self) -> Option<&ChannelPublicKeys> {
- self.get_channel_parameters()
- .and_then(|params| params.counterparty_parameters.as_ref().map(|params| ¶ms.pubkeys))
+ self.get_channel_parameters().and_then(|params| {
+ params.counterparty_parameters.as_ref().map(|params| ¶ms.pubkeys)
+ })
}
/// Returns the `contest_delay` value specified by our counterparty and applied on holder-broadcastable
/// Will return `None` if [`ChannelSigner::provide_channel_parameters`] has not been called.
/// In general, this is safe to `unwrap` only in [`ChannelSigner`] implementation.
pub fn counterparty_selected_contest_delay(&self) -> Option<u16> {
- self.get_channel_parameters()
- .and_then(|params| params.counterparty_parameters.as_ref().map(|params| params.selected_contest_delay))
+ self.get_channel_parameters().and_then(|params| {
+ params.counterparty_parameters.as_ref().map(|params| params.selected_contest_delay)
+ })
}
/// Returns the `contest_delay` value specified by us and applied on transactions broadcastable
/// or if an output descriptor `script_pubkey` does not match the one we can spend.
///
/// [`descriptor.outpoint`]: StaticPaymentOutputDescriptor::outpoint
- pub fn sign_counterparty_payment_input<C: Signing>(&self, spend_tx: &Transaction, input_idx: usize, descriptor: &StaticPaymentOutputDescriptor, secp_ctx: &Secp256k1<C>) -> Result<Witness, ()> {
+ pub fn sign_counterparty_payment_input<C: Signing>(
+ &self, spend_tx: &Transaction, input_idx: usize,
+ descriptor: &StaticPaymentOutputDescriptor, secp_ctx: &Secp256k1<C>,
+ ) -> Result<Witness, ()> {
// TODO: We really should be taking the SigHashCache as a parameter here instead of
// spend_tx, but ideally the SigHashCache would expose the transaction's inputs read-only
// so that we can check them. This requires upstream rust-bitcoin changes (as well as
// bindings updates to support SigHashCache objects).
- if spend_tx.input.len() <= input_idx { return Err(()); }
- if !spend_tx.input[input_idx].script_sig.is_empty() { return Err(()); }
- if spend_tx.input[input_idx].previous_output != descriptor.outpoint.into_bitcoin_outpoint() { return Err(()); }
+ if spend_tx.input.len() <= input_idx {
+ return Err(());
+ }
+ if !spend_tx.input[input_idx].script_sig.is_empty() {
+ return Err(());
+ }
+ if spend_tx.input[input_idx].previous_output != descriptor.outpoint.into_bitcoin_outpoint()
+ {
+ return Err(());
+ }
let remotepubkey = bitcoin::PublicKey::new(self.pubkeys().payment_point);
// We cannot always assume that `channel_parameters` is set, so can't just call
// `self.channel_parameters()` or anything that relies on it
- let supports_anchors_zero_fee_htlc_tx = self.channel_type_features()
+ let supports_anchors_zero_fee_htlc_tx = self
+ .channel_type_features()
.map(|features| features.supports_anchors_zero_fee_htlc_tx())
.unwrap_or(false);
} else {
ScriptBuf::new_p2pkh(&remotepubkey.pubkey_hash())
};
- let sighash = hash_to_message!(&sighash::SighashCache::new(spend_tx).segwit_signature_hash(input_idx, &witness_script, descriptor.output.value, EcdsaSighashType::All).unwrap()[..]);
+ let sighash = hash_to_message!(
+ &sighash::SighashCache::new(spend_tx)
+ .segwit_signature_hash(
+ input_idx,
+ &witness_script,
+ descriptor.output.value,
+ EcdsaSighashType::All
+ )
+ .unwrap()[..]
+ );
let remotesig = sign_with_aux_rand(secp_ctx, &sighash, &self.payment_key, &self);
let payment_script = if supports_anchors_zero_fee_htlc_tx {
witness_script.to_v0_p2wsh()
ScriptBuf::new_v0_p2wpkh(&remotepubkey.wpubkey_hash().unwrap())
};
- if payment_script != descriptor.output.script_pubkey { return Err(()); }
+ if payment_script != descriptor.output.script_pubkey {
+ return Err(());
+ }
let mut witness = Vec::with_capacity(2);
witness.push(remotesig.serialize_der().to_vec());
///
/// [`descriptor.outpoint`]: DelayedPaymentOutputDescriptor::outpoint
/// [`descriptor.to_self_delay`]: DelayedPaymentOutputDescriptor::to_self_delay
- pub fn sign_dynamic_p2wsh_input<C: Signing>(&self, spend_tx: &Transaction, input_idx: usize, descriptor: &DelayedPaymentOutputDescriptor, secp_ctx: &Secp256k1<C>) -> Result<Witness, ()> {
+ pub fn sign_dynamic_p2wsh_input<C: Signing>(
+ &self, spend_tx: &Transaction, input_idx: usize,
+ descriptor: &DelayedPaymentOutputDescriptor, secp_ctx: &Secp256k1<C>,
+ ) -> Result<Witness, ()> {
// TODO: We really should be taking the SigHashCache as a parameter here instead of
// spend_tx, but ideally the SigHashCache would expose the transaction's inputs read-only
// so that we can check them. This requires upstream rust-bitcoin changes (as well as
// bindings updates to support SigHashCache objects).
- if spend_tx.input.len() <= input_idx { return Err(()); }
- if !spend_tx.input[input_idx].script_sig.is_empty() { return Err(()); }
- if spend_tx.input[input_idx].previous_output != descriptor.outpoint.into_bitcoin_outpoint() { return Err(()); }
- if spend_tx.input[input_idx].sequence.0 != descriptor.to_self_delay as u32 { return Err(()); }
-
- let delayed_payment_key = chan_utils::derive_private_key(&secp_ctx, &descriptor.per_commitment_point, &self.delayed_payment_base_key);
- let delayed_payment_pubkey = DelayedPaymentKey::from_secret_key(&secp_ctx, &delayed_payment_key);
- let witness_script = chan_utils::get_revokeable_redeemscript(&descriptor.revocation_pubkey, descriptor.to_self_delay, &delayed_payment_pubkey);
- let sighash = hash_to_message!(&sighash::SighashCache::new(spend_tx).segwit_signature_hash(input_idx, &witness_script, descriptor.output.value, EcdsaSighashType::All).unwrap()[..]);
+ if spend_tx.input.len() <= input_idx {
+ return Err(());
+ }
+ if !spend_tx.input[input_idx].script_sig.is_empty() {
+ return Err(());
+ }
+ if spend_tx.input[input_idx].previous_output != descriptor.outpoint.into_bitcoin_outpoint()
+ {
+ return Err(());
+ }
+ if spend_tx.input[input_idx].sequence.0 != descriptor.to_self_delay as u32 {
+ return Err(());
+ }
+
+ let delayed_payment_key = chan_utils::derive_private_key(
+ &secp_ctx,
+ &descriptor.per_commitment_point,
+ &self.delayed_payment_base_key,
+ );
+ let delayed_payment_pubkey =
+ DelayedPaymentKey::from_secret_key(&secp_ctx, &delayed_payment_key);
+ let witness_script = chan_utils::get_revokeable_redeemscript(
+ &descriptor.revocation_pubkey,
+ descriptor.to_self_delay,
+ &delayed_payment_pubkey,
+ );
+ let sighash = hash_to_message!(
+ &sighash::SighashCache::new(spend_tx)
+ .segwit_signature_hash(
+ input_idx,
+ &witness_script,
+ descriptor.output.value,
+ EcdsaSighashType::All
+ )
+ .unwrap()[..]
+ );
let local_delayedsig = EcdsaSignature {
sig: sign_with_aux_rand(secp_ctx, &sighash, &delayed_payment_key, &self),
hash_ty: EcdsaSighashType::All,
};
- let payment_script = bitcoin::Address::p2wsh(&witness_script, Network::Bitcoin).script_pubkey();
+ let payment_script =
+ bitcoin::Address::p2wsh(&witness_script, Network::Bitcoin).script_pubkey();
- if descriptor.output.script_pubkey != payment_script { return Err(()); }
+ if descriptor.output.script_pubkey != payment_script {
+ return Err(());
+ }
Ok(Witness::from_slice(&[
&local_delayedsig.serialize()[..],
impl EntropySource for InMemorySigner {
fn get_secure_random_bytes(&self) -> [u8; 32] {
- let index = self.rand_bytes_index.get_increment();
- let mut nonce = [0u8; 16];
- nonce[..8].copy_from_slice(&index.to_be_bytes());
- ChaCha20::get_single_block(&self.rand_bytes_unique_start, &nonce)
+ self.entropy_source.get_secure_random_bytes()
}
}
impl ChannelSigner for InMemorySigner {
- fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1<secp256k1::All>) -> PublicKey {
- let commitment_secret = SecretKey::from_slice(&chan_utils::build_commitment_secret(&self.commitment_seed, idx)).unwrap();
+ fn get_per_commitment_point(
+ &self, idx: u64, secp_ctx: &Secp256k1<secp256k1::All>,
+ ) -> PublicKey {
+ let commitment_secret =
+ SecretKey::from_slice(&chan_utils::build_commitment_secret(&self.commitment_seed, idx))
+ .unwrap();
PublicKey::from_secret_key(secp_ctx, &commitment_secret)
}
chan_utils::build_commitment_secret(&self.commitment_seed, idx)
}
- fn validate_holder_commitment(&self, _holder_tx: &HolderCommitmentTransaction, _outbound_htlc_preimages: Vec<PaymentPreimage>) -> Result<(), ()> {
+ fn validate_holder_commitment(
+ &self, _holder_tx: &HolderCommitmentTransaction,
+ _outbound_htlc_preimages: Vec<PaymentPreimage>,
+ ) -> Result<(), ()> {
Ok(())
}
Ok(())
}
- fn pubkeys(&self) -> &ChannelPublicKeys { &self.holder_channel_pubkeys }
+ fn pubkeys(&self) -> &ChannelPublicKeys {
+ &self.holder_channel_pubkeys
+ }
- fn channel_keys_id(&self) -> [u8; 32] { self.channel_keys_id }
+ fn channel_keys_id(&self) -> [u8; 32] {
+ self.channel_keys_id
+ }
fn provide_channel_parameters(&mut self, channel_parameters: &ChannelTransactionParameters) {
- assert!(self.channel_parameters.is_none() || self.channel_parameters.as_ref().unwrap() == channel_parameters);
+ assert!(
+ self.channel_parameters.is_none()
+ || self.channel_parameters.as_ref().unwrap() == channel_parameters
+ );
if self.channel_parameters.is_some() {
// The channel parameters were already set and they match, return early.
return;
}
}
-const MISSING_PARAMS_ERR: &'static str = "ChannelSigner::provide_channel_parameters must be called before signing operations";
+const MISSING_PARAMS_ERR: &'static str =
+ "ChannelSigner::provide_channel_parameters must be called before signing operations";
impl EcdsaChannelSigner for InMemorySigner {
- fn sign_counterparty_commitment(&self, commitment_tx: &CommitmentTransaction, _inbound_htlc_preimages: Vec<PaymentPreimage>, _outbound_htlc_preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
+ fn sign_counterparty_commitment(
+ &self, commitment_tx: &CommitmentTransaction,
+ _inbound_htlc_preimages: Vec<PaymentPreimage>,
+ _outbound_htlc_preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<secp256k1::All>,
+ ) -> Result<(Signature, Vec<Signature>), ()> {
let trusted_tx = commitment_tx.trust();
let keys = trusted_tx.keys();
let funding_pubkey = PublicKey::from_secret_key(secp_ctx, &self.funding_key);
let counterparty_keys = self.counterparty_pubkeys().expect(MISSING_PARAMS_ERR);
- let channel_funding_redeemscript = make_funding_redeemscript(&funding_pubkey, &counterparty_keys.funding_pubkey);
+ let channel_funding_redeemscript =
+ make_funding_redeemscript(&funding_pubkey, &counterparty_keys.funding_pubkey);
let built_tx = trusted_tx.built_transaction();
- let commitment_sig = built_tx.sign_counterparty_commitment(&self.funding_key, &channel_funding_redeemscript, self.channel_value_satoshis, secp_ctx);
+ let commitment_sig = built_tx.sign_counterparty_commitment(
+ &self.funding_key,
+ &channel_funding_redeemscript,
+ self.channel_value_satoshis,
+ secp_ctx,
+ );
let commitment_txid = built_tx.txid;
let mut htlc_sigs = Vec::with_capacity(commitment_tx.htlcs().len());
let holder_selected_contest_delay =
self.holder_selected_contest_delay().expect(MISSING_PARAMS_ERR);
let chan_type = &channel_parameters.channel_type_features;
- let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_tx.feerate_per_kw(), holder_selected_contest_delay, htlc, chan_type, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
+ let htlc_tx = chan_utils::build_htlc_transaction(
+ &commitment_txid,
+ commitment_tx.feerate_per_kw(),
+ holder_selected_contest_delay,
+ htlc,
+ chan_type,
+ &keys.broadcaster_delayed_payment_key,
+ &keys.revocation_key,
+ );
let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, chan_type, &keys);
- let htlc_sighashtype = if chan_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
- let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
- let holder_htlc_key = chan_utils::derive_private_key(&secp_ctx, &keys.per_commitment_point, &self.htlc_base_key);
+ let htlc_sighashtype = if chan_type.supports_anchors_zero_fee_htlc_tx() {
+ EcdsaSighashType::SinglePlusAnyoneCanPay
+ } else {
+ EcdsaSighashType::All
+ };
+ let htlc_sighash = hash_to_message!(
+ &sighash::SighashCache::new(&htlc_tx)
+ .segwit_signature_hash(
+ 0,
+ &htlc_redeemscript,
+ htlc.amount_msat / 1000,
+ htlc_sighashtype
+ )
+ .unwrap()[..]
+ );
+ let holder_htlc_key = chan_utils::derive_private_key(
+ &secp_ctx,
+ &keys.per_commitment_point,
+ &self.htlc_base_key,
+ );
htlc_sigs.push(sign(secp_ctx, &htlc_sighash, &holder_htlc_key));
}
Ok((commitment_sig, htlc_sigs))
}
- fn sign_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
+ fn sign_holder_commitment(
+ &self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>,
+ ) -> Result<Signature, ()> {
let funding_pubkey = PublicKey::from_secret_key(secp_ctx, &self.funding_key);
let counterparty_keys = self.counterparty_pubkeys().expect(MISSING_PARAMS_ERR);
- let funding_redeemscript = make_funding_redeemscript(&funding_pubkey, &counterparty_keys.funding_pubkey);
+ let funding_redeemscript =
+ make_funding_redeemscript(&funding_pubkey, &counterparty_keys.funding_pubkey);
let trusted_tx = commitment_tx.trust();
- Ok(trusted_tx.built_transaction().sign_holder_commitment(&self.funding_key, &funding_redeemscript, self.channel_value_satoshis, &self, secp_ctx))
- }
-
- #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
- fn unsafe_sign_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
+ Ok(trusted_tx.built_transaction().sign_holder_commitment(
+ &self.funding_key,
+ &funding_redeemscript,
+ self.channel_value_satoshis,
+ &self,
+ secp_ctx,
+ ))
+ }
+
+ #[cfg(any(test, feature = "unsafe_revoked_tx_signing"))]
+ fn unsafe_sign_holder_commitment(
+ &self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>,
+ ) -> Result<Signature, ()> {
let funding_pubkey = PublicKey::from_secret_key(secp_ctx, &self.funding_key);
let counterparty_keys = self.counterparty_pubkeys().expect(MISSING_PARAMS_ERR);
- let funding_redeemscript = make_funding_redeemscript(&funding_pubkey, &counterparty_keys.funding_pubkey);
+ let funding_redeemscript =
+ make_funding_redeemscript(&funding_pubkey, &counterparty_keys.funding_pubkey);
let trusted_tx = commitment_tx.trust();
- Ok(trusted_tx.built_transaction().sign_holder_commitment(&self.funding_key, &funding_redeemscript, self.channel_value_satoshis, &self, secp_ctx))
- }
-
- fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
- let revocation_key = chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_key, &self.revocation_base_key);
+ Ok(trusted_tx.built_transaction().sign_holder_commitment(
+ &self.funding_key,
+ &funding_redeemscript,
+ self.channel_value_satoshis,
+ &self,
+ secp_ctx,
+ ))
+ }
+
+ fn sign_justice_revoked_output(
+ &self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey,
+ secp_ctx: &Secp256k1<secp256k1::All>,
+ ) -> Result<Signature, ()> {
+ let revocation_key = chan_utils::derive_private_revocation_key(
+ &secp_ctx,
+ &per_commitment_key,
+ &self.revocation_base_key,
+ );
let per_commitment_point = PublicKey::from_secret_key(secp_ctx, &per_commitment_key);
let revocation_pubkey = RevocationKey::from_basepoint(
- &secp_ctx, &self.pubkeys().revocation_basepoint, &per_commitment_point,
+ &secp_ctx,
+ &self.pubkeys().revocation_basepoint,
+ &per_commitment_point,
);
let witness_script = {
let counterparty_keys = self.counterparty_pubkeys().expect(MISSING_PARAMS_ERR);
let holder_selected_contest_delay =
self.holder_selected_contest_delay().expect(MISSING_PARAMS_ERR);
- let counterparty_delayedpubkey = DelayedPaymentKey::from_basepoint(&secp_ctx, &counterparty_keys.delayed_payment_basepoint, &per_commitment_point);
- chan_utils::get_revokeable_redeemscript(&revocation_pubkey, holder_selected_contest_delay, &counterparty_delayedpubkey)
+ let counterparty_delayedpubkey = DelayedPaymentKey::from_basepoint(
+ &secp_ctx,
+ &counterparty_keys.delayed_payment_basepoint,
+ &per_commitment_point,
+ );
+ chan_utils::get_revokeable_redeemscript(
+ &revocation_pubkey,
+ holder_selected_contest_delay,
+ &counterparty_delayedpubkey,
+ )
};
let mut sighash_parts = sighash::SighashCache::new(justice_tx);
- let sighash = hash_to_message!(&sighash_parts.segwit_signature_hash(input, &witness_script, amount, EcdsaSighashType::All).unwrap()[..]);
- return Ok(sign_with_aux_rand(secp_ctx, &sighash, &revocation_key, &self))
+ let sighash = hash_to_message!(
+ &sighash_parts
+ .segwit_signature_hash(input, &witness_script, amount, EcdsaSighashType::All)
+ .unwrap()[..]
+ );
+ return Ok(sign_with_aux_rand(secp_ctx, &sighash, &revocation_key, &self));
}
- fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
- let revocation_key = chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_key, &self.revocation_base_key);
+ fn sign_justice_revoked_htlc(
+ &self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey,
+ htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>,
+ ) -> Result<Signature, ()> {
+ let revocation_key = chan_utils::derive_private_revocation_key(
+ &secp_ctx,
+ &per_commitment_key,
+ &self.revocation_base_key,
+ );
let per_commitment_point = PublicKey::from_secret_key(secp_ctx, &per_commitment_key);
let revocation_pubkey = RevocationKey::from_basepoint(
- &secp_ctx, &self.pubkeys().revocation_basepoint, &per_commitment_point,
+ &secp_ctx,
+ &self.pubkeys().revocation_basepoint,
+ &per_commitment_point,
);
let witness_script = {
let counterparty_keys = self.counterparty_pubkeys().expect(MISSING_PARAMS_ERR);
let counterparty_htlcpubkey = HtlcKey::from_basepoint(
- &secp_ctx, &counterparty_keys.htlc_basepoint, &per_commitment_point,
+ &secp_ctx,
+ &counterparty_keys.htlc_basepoint,
+ &per_commitment_point,
);
let holder_htlcpubkey = HtlcKey::from_basepoint(
- &secp_ctx, &self.pubkeys().htlc_basepoint, &per_commitment_point,
+ &secp_ctx,
+ &self.pubkeys().htlc_basepoint,
+ &per_commitment_point,
);
let chan_type = self.channel_type_features().expect(MISSING_PARAMS_ERR);
- chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, chan_type, &counterparty_htlcpubkey, &holder_htlcpubkey, &revocation_pubkey)
+ chan_utils::get_htlc_redeemscript_with_explicit_keys(
+ &htlc,
+ chan_type,
+ &counterparty_htlcpubkey,
+ &holder_htlcpubkey,
+ &revocation_pubkey,
+ )
};
let mut sighash_parts = sighash::SighashCache::new(justice_tx);
- let sighash = hash_to_message!(&sighash_parts.segwit_signature_hash(input, &witness_script, amount, EcdsaSighashType::All).unwrap()[..]);
- return Ok(sign_with_aux_rand(secp_ctx, &sighash, &revocation_key, &self))
+ let sighash = hash_to_message!(
+ &sighash_parts
+ .segwit_signature_hash(input, &witness_script, amount, EcdsaSighashType::All)
+ .unwrap()[..]
+ );
+ return Ok(sign_with_aux_rand(secp_ctx, &sighash, &revocation_key, &self));
}
fn sign_holder_htlc_transaction(
&self, htlc_tx: &Transaction, input: usize, htlc_descriptor: &HTLCDescriptor,
- secp_ctx: &Secp256k1<secp256k1::All>
+ secp_ctx: &Secp256k1<secp256k1::All>,
) -> Result<Signature, ()> {
let witness_script = htlc_descriptor.witness_script(secp_ctx);
- let sighash = &sighash::SighashCache::new(&*htlc_tx).segwit_signature_hash(
- input, &witness_script, htlc_descriptor.htlc.amount_msat / 1000, EcdsaSighashType::All
- ).map_err(|_| ())?;
+ let sighash = &sighash::SighashCache::new(&*htlc_tx)
+ .segwit_signature_hash(
+ input,
+ &witness_script,
+ htlc_descriptor.htlc.amount_msat / 1000,
+ EcdsaSighashType::All,
+ )
+ .map_err(|_| ())?;
let our_htlc_private_key = chan_utils::derive_private_key(
- &secp_ctx, &htlc_descriptor.per_commitment_point, &self.htlc_base_key
+ &secp_ctx,
+ &htlc_descriptor.per_commitment_point,
+ &self.htlc_base_key,
);
- Ok(sign_with_aux_rand(&secp_ctx, &hash_to_message!(sighash.as_byte_array()), &our_htlc_private_key, &self))
+ let sighash = hash_to_message!(sighash.as_byte_array());
+ Ok(sign_with_aux_rand(&secp_ctx, &sighash, &our_htlc_private_key, &self))
}
- fn sign_counterparty_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, amount: u64, per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
- let htlc_key = chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &self.htlc_base_key);
+ fn sign_counterparty_htlc_transaction(
+ &self, htlc_tx: &Transaction, input: usize, amount: u64, per_commitment_point: &PublicKey,
+ htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>,
+ ) -> Result<Signature, ()> {
+ let htlc_key =
+ chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &self.htlc_base_key);
let revocation_pubkey = RevocationKey::from_basepoint(
- &secp_ctx, &self.pubkeys().revocation_basepoint, &per_commitment_point,
+ &secp_ctx,
+ &self.pubkeys().revocation_basepoint,
+ &per_commitment_point,
);
let counterparty_keys = self.counterparty_pubkeys().expect(MISSING_PARAMS_ERR);
let counterparty_htlcpubkey = HtlcKey::from_basepoint(
- &secp_ctx, &counterparty_keys.htlc_basepoint, &per_commitment_point,
+ &secp_ctx,
+ &counterparty_keys.htlc_basepoint,
+ &per_commitment_point,
);
- let htlcpubkey = HtlcKey::from_basepoint(&secp_ctx, &self.pubkeys().htlc_basepoint, &per_commitment_point);
+ let htlc_basepoint = self.pubkeys().htlc_basepoint;
+ let htlcpubkey = HtlcKey::from_basepoint(&secp_ctx, &htlc_basepoint, &per_commitment_point);
let chan_type = self.channel_type_features().expect(MISSING_PARAMS_ERR);
- let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, chan_type, &counterparty_htlcpubkey, &htlcpubkey, &revocation_pubkey);
+ let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(
+ &htlc,
+ chan_type,
+ &counterparty_htlcpubkey,
+ &htlcpubkey,
+ &revocation_pubkey,
+ );
let mut sighash_parts = sighash::SighashCache::new(htlc_tx);
- let sighash = hash_to_message!(&sighash_parts.segwit_signature_hash(input, &witness_script, amount, EcdsaSighashType::All).unwrap()[..]);
+ let sighash = hash_to_message!(
+ &sighash_parts
+ .segwit_signature_hash(input, &witness_script, amount, EcdsaSighashType::All)
+ .unwrap()[..]
+ );
Ok(sign_with_aux_rand(secp_ctx, &sighash, &htlc_key, &self))
}
- fn sign_closing_transaction(&self, closing_tx: &ClosingTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
+ fn sign_closing_transaction(
+ &self, closing_tx: &ClosingTransaction, secp_ctx: &Secp256k1<secp256k1::All>,
+ ) -> Result<Signature, ()> {
let funding_pubkey = PublicKey::from_secret_key(secp_ctx, &self.funding_key);
- let counterparty_funding_key = &self.counterparty_pubkeys().expect(MISSING_PARAMS_ERR).funding_pubkey;
- let channel_funding_redeemscript = make_funding_redeemscript(&funding_pubkey, counterparty_funding_key);
- Ok(closing_tx.trust().sign(&self.funding_key, &channel_funding_redeemscript, self.channel_value_satoshis, secp_ctx))
+ let counterparty_funding_key =
+ &self.counterparty_pubkeys().expect(MISSING_PARAMS_ERR).funding_pubkey;
+ let channel_funding_redeemscript =
+ make_funding_redeemscript(&funding_pubkey, counterparty_funding_key);
+ Ok(closing_tx.trust().sign(
+ &self.funding_key,
+ &channel_funding_redeemscript,
+ self.channel_value_satoshis,
+ secp_ctx,
+ ))
}
fn sign_holder_anchor_input(
&self, anchor_tx: &Transaction, input: usize, secp_ctx: &Secp256k1<secp256k1::All>,
) -> Result<Signature, ()> {
- let witness_script = chan_utils::get_anchor_redeemscript(&self.holder_channel_pubkeys.funding_pubkey);
- let sighash = sighash::SighashCache::new(&*anchor_tx).segwit_signature_hash(
- input, &witness_script, ANCHOR_OUTPUT_VALUE_SATOSHI, EcdsaSighashType::All,
- ).unwrap();
+ let witness_script =
+ chan_utils::get_anchor_redeemscript(&self.holder_channel_pubkeys.funding_pubkey);
+ let sighash = sighash::SighashCache::new(&*anchor_tx)
+ .segwit_signature_hash(
+ input,
+ &witness_script,
+ ANCHOR_OUTPUT_VALUE_SATOSHI,
+ EcdsaSighashType::All,
+ )
+ .unwrap();
Ok(sign_with_aux_rand(secp_ctx, &hash_to_message!(&sighash[..]), &self.funding_key, &self))
}
fn sign_channel_announcement_with_funding_key(
- &self, msg: &UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<secp256k1::All>
+ &self, msg: &UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<secp256k1::All>,
) -> Result<Signature, ()> {
let msghash = hash_to_message!(&Sha256dHash::hash(&msg.encode()[..])[..]);
Ok(secp_ctx.sign_ecdsa(&msghash, &self.funding_key))
#[cfg(taproot)]
impl TaprootChannelSigner for InMemorySigner {
- fn generate_local_nonce_pair(&self, commitment_number: u64, secp_ctx: &Secp256k1<All>) -> PublicNonce {
+ fn generate_local_nonce_pair(
+ &self, commitment_number: u64, secp_ctx: &Secp256k1<All>,
+ ) -> PublicNonce {
todo!()
}
- fn partially_sign_counterparty_commitment(&self, counterparty_nonce: PublicNonce, commitment_tx: &CommitmentTransaction, inbound_htlc_preimages: Vec<PaymentPreimage>, outbound_htlc_preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<All>) -> Result<(PartialSignatureWithNonce, Vec<schnorr::Signature>), ()> {
+ fn partially_sign_counterparty_commitment(
+ &self, counterparty_nonce: PublicNonce, commitment_tx: &CommitmentTransaction,
+ inbound_htlc_preimages: Vec<PaymentPreimage>,
+ outbound_htlc_preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<All>,
+ ) -> Result<(PartialSignatureWithNonce, Vec<schnorr::Signature>), ()> {
todo!()
}
- fn finalize_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction, counterparty_partial_signature: PartialSignatureWithNonce, secp_ctx: &Secp256k1<All>) -> Result<PartialSignature, ()> {
+ fn finalize_holder_commitment(
+ &self, commitment_tx: &HolderCommitmentTransaction,
+ counterparty_partial_signature: PartialSignatureWithNonce, secp_ctx: &Secp256k1<All>,
+ ) -> Result<PartialSignature, ()> {
todo!()
}
- fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<All>) -> Result<schnorr::Signature, ()> {
+ fn sign_justice_revoked_output(
+ &self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey,
+ secp_ctx: &Secp256k1<All>,
+ ) -> Result<schnorr::Signature, ()> {
todo!()
}
- fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<All>) -> Result<schnorr::Signature, ()> {
+ fn sign_justice_revoked_htlc(
+ &self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey,
+ htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<All>,
+ ) -> Result<schnorr::Signature, ()> {
todo!()
}
- fn sign_holder_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, htlc_descriptor: &HTLCDescriptor, secp_ctx: &Secp256k1<All>) -> Result<schnorr::Signature, ()> {
+ fn sign_holder_htlc_transaction(
+ &self, htlc_tx: &Transaction, input: usize, htlc_descriptor: &HTLCDescriptor,
+ secp_ctx: &Secp256k1<All>,
+ ) -> Result<schnorr::Signature, ()> {
todo!()
}
- fn sign_counterparty_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, amount: u64, per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<All>) -> Result<schnorr::Signature, ()> {
+ fn sign_counterparty_htlc_transaction(
+ &self, htlc_tx: &Transaction, input: usize, amount: u64, per_commitment_point: &PublicKey,
+ htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<All>,
+ ) -> Result<schnorr::Signature, ()> {
todo!()
}
- fn partially_sign_closing_transaction(&self, closing_tx: &ClosingTransaction, secp_ctx: &Secp256k1<All>) -> Result<PartialSignature, ()> {
+ fn partially_sign_closing_transaction(
+ &self, closing_tx: &ClosingTransaction, secp_ctx: &Secp256k1<All>,
+ ) -> Result<PartialSignature, ()> {
todo!()
}
- fn sign_holder_anchor_input(&self, anchor_tx: &Transaction, input: usize, secp_ctx: &Secp256k1<All>) -> Result<schnorr::Signature, ()> {
+ fn sign_holder_anchor_input(
+ &self, anchor_tx: &Transaction, input: usize, secp_ctx: &Secp256k1<All>,
+ ) -> Result<schnorr::Signature, ()> {
todo!()
}
}
}
}
-impl<ES: Deref> ReadableArgs<ES> for InMemorySigner where ES::Target: EntropySource {
+impl<ES: Deref> ReadableArgs<ES> for InMemorySigner
+where
+ ES::Target: EntropySource,
+{
fn read<R: io::Read>(reader: &mut R, entropy_source: ES) -> Result<Self, DecodeError> {
let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
let counterparty_channel_data = Readable::read(reader)?;
let channel_value_satoshis = Readable::read(reader)?;
let secp_ctx = Secp256k1::signing_only();
- let holder_channel_pubkeys =
- InMemorySigner::make_holder_keys(&secp_ctx, &funding_key, &revocation_base_key,
- &payment_key, &delayed_payment_base_key, &htlc_base_key);
+ let holder_channel_pubkeys = InMemorySigner::make_holder_keys(
+ &secp_ctx,
+ &funding_key,
+ &revocation_base_key,
+ &payment_key,
+ &delayed_payment_base_key,
+ &htlc_base_key,
+ );
let keys_id = Readable::read(reader)?;
read_tlv_fields!(reader, {});
holder_channel_pubkeys,
channel_parameters: counterparty_channel_data,
channel_keys_id: keys_id,
- rand_bytes_unique_start: entropy_source.get_secure_random_bytes(),
- rand_bytes_index: AtomicCounter::new(),
+ entropy_source: RandomBytes::new(entropy_source.get_secure_random_bytes()),
})
}
}
channel_master_key: ExtendedPrivKey,
channel_child_index: AtomicUsize,
- rand_bytes_unique_start: [u8; 32],
- rand_bytes_index: AtomicCounter,
+ entropy_source: RandomBytes,
seed: [u8; 32],
starting_time_secs: u64,
// Note that when we aren't serializing the key, network doesn't matter
match ExtendedPrivKey::new_master(Network::Testnet, seed) {
Ok(master_key) => {
- let node_secret = master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(0).unwrap()).expect("Your RNG is busted").private_key;
+ let node_secret = master_key
+ .ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(0).unwrap())
+ .expect("Your RNG is busted")
+ .private_key;
let node_id = PublicKey::from_secret_key(&secp_ctx, &node_secret);
- let destination_script = match master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(1).unwrap()) {
+ let destination_script = match master_key
+ .ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(1).unwrap())
+ {
Ok(destination_key) => {
- let wpubkey_hash = WPubkeyHash::hash(&ExtendedPubKey::from_priv(&secp_ctx, &destination_key).to_pub().to_bytes());
- Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0)
+ let wpubkey_hash = WPubkeyHash::hash(
+ &ExtendedPubKey::from_priv(&secp_ctx, &destination_key)
+ .to_pub()
+ .to_bytes(),
+ );
+ Builder::new()
+ .push_opcode(opcodes::all::OP_PUSHBYTES_0)
.push_slice(&wpubkey_hash.to_byte_array())
.into_script()
},
Err(_) => panic!("Your RNG is busted"),
};
- let shutdown_pubkey = match master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(2).unwrap()) {
- Ok(shutdown_key) => ExtendedPubKey::from_priv(&secp_ctx, &shutdown_key).public_key,
+ let shutdown_pubkey = match master_key
+ .ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(2).unwrap())
+ {
+ Ok(shutdown_key) => {
+ ExtendedPubKey::from_priv(&secp_ctx, &shutdown_key).public_key
+ },
Err(_) => panic!("Your RNG is busted"),
};
- let channel_master_key = master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(3).unwrap()).expect("Your RNG is busted");
- let inbound_payment_key: SecretKey = master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(5).unwrap()).expect("Your RNG is busted").private_key;
+ let channel_master_key = master_key
+ .ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(3).unwrap())
+ .expect("Your RNG is busted");
+ let inbound_payment_key: SecretKey = master_key
+ .ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(5).unwrap())
+ .expect("Your RNG is busted")
+ .private_key;
let mut inbound_pmt_key_bytes = [0; 32];
inbound_pmt_key_bytes.copy_from_slice(&inbound_payment_key[..]);
rand_bytes_engine.input(&starting_time_nanos.to_be_bytes());
rand_bytes_engine.input(seed);
rand_bytes_engine.input(b"LDK PRNG Seed");
- let rand_bytes_unique_start = Sha256::from_engine(rand_bytes_engine).to_byte_array();
+ let rand_bytes_unique_start =
+ Sha256::from_engine(rand_bytes_engine).to_byte_array();
let mut res = KeysManager {
secp_ctx,
channel_master_key,
channel_child_index: AtomicUsize::new(0),
- rand_bytes_unique_start,
- rand_bytes_index: AtomicCounter::new(),
+ entropy_source: RandomBytes::new(rand_bytes_unique_start),
seed: *seed,
starting_time_secs,
}
/// Derive an old [`WriteableEcdsaChannelSigner`] containing per-channel secrets based on a key derivation parameters.
- pub fn derive_channel_keys(&self, channel_value_satoshis: u64, params: &[u8; 32]) -> InMemorySigner {
+ pub fn derive_channel_keys(
+ &self, channel_value_satoshis: u64, params: &[u8; 32],
+ ) -> InMemorySigner {
let chan_id = u64::from_be_bytes(params[0..8].try_into().unwrap());
let mut unique_start = Sha256::engine();
unique_start.input(params);
// We only seriously intend to rely on the channel_master_key for true secure
// entropy, everything else just ensures uniqueness. We rely on the unique_start (ie
// starting_time provided in the constructor) to be unique.
- let child_privkey = self.channel_master_key.ckd_priv(&self.secp_ctx,
- ChildNumber::from_hardened_idx((chan_id as u32) % (1 << 31)).expect("key space exhausted")
- ).expect("Your RNG is busted");
+ let child_privkey = self
+ .channel_master_key
+ .ckd_priv(
+ &self.secp_ctx,
+ ChildNumber::from_hardened_idx((chan_id as u32) % (1 << 31))
+ .expect("key space exhausted"),
+ )
+ .expect("Your RNG is busted");
unique_start.input(&child_privkey.private_key[..]);
let seed = Sha256::from_engine(unique_start).to_byte_array();
sha.input(&seed);
sha.input(&$prev_key[..]);
sha.input(&$info[..]);
- SecretKey::from_slice(&Sha256::from_engine(sha).to_byte_array()).expect("SHA-256 is busted")
- }}
+ SecretKey::from_slice(&Sha256::from_engine(sha).to_byte_array())
+ .expect("SHA-256 is busted")
+ }};
}
let funding_key = key_step!(b"funding key", commitment_seed);
let revocation_base_key = key_step!(b"revocation base key", funding_key);
///
/// May panic if the [`SpendableOutputDescriptor`]s were not generated by channels which used
/// this [`KeysManager`] or one of the [`InMemorySigner`] created by this [`KeysManager`].
- pub fn sign_spendable_outputs_psbt<C: Signing>(&self, descriptors: &[&SpendableOutputDescriptor], mut psbt: PartiallySignedTransaction, secp_ctx: &Secp256k1<C>) -> Result<PartiallySignedTransaction, ()> {
+ pub fn sign_spendable_outputs_psbt<C: Signing>(
+ &self, descriptors: &[&SpendableOutputDescriptor], mut psbt: PartiallySignedTransaction,
+ secp_ctx: &Secp256k1<C>,
+ ) -> Result<PartiallySignedTransaction, ()> {
let mut keys_cache: Option<(InMemorySigner, [u8; 32])> = None;
for outp in descriptors {
+ let get_input_idx = |outpoint: &OutPoint| {
+ psbt.unsigned_tx
+ .input
+ .iter()
+ .position(|i| i.previous_output == outpoint.into_bitcoin_outpoint())
+ .ok_or(())
+ };
match outp {
SpendableOutputDescriptor::StaticPaymentOutput(descriptor) => {
- let input_idx = psbt.unsigned_tx.input.iter().position(|i| i.previous_output == descriptor.outpoint.into_bitcoin_outpoint()).ok_or(())?;
- if keys_cache.is_none() || keys_cache.as_ref().unwrap().1 != descriptor.channel_keys_id {
- let mut signer = self.derive_channel_keys(descriptor.channel_value_satoshis, &descriptor.channel_keys_id);
- if let Some(channel_params) = descriptor.channel_transaction_parameters.as_ref() {
+ let input_idx = get_input_idx(&descriptor.outpoint)?;
+ if keys_cache.is_none()
+ || keys_cache.as_ref().unwrap().1 != descriptor.channel_keys_id
+ {
+ let mut signer = self.derive_channel_keys(
+ descriptor.channel_value_satoshis,
+ &descriptor.channel_keys_id,
+ );
+ if let Some(channel_params) =
+ descriptor.channel_transaction_parameters.as_ref()
+ {
signer.provide_channel_parameters(channel_params);
}
keys_cache = Some((signer, descriptor.channel_keys_id));
}
- let witness = keys_cache.as_ref().unwrap().0.sign_counterparty_payment_input(&psbt.unsigned_tx, input_idx, &descriptor, &secp_ctx)?;
+ let witness = keys_cache.as_ref().unwrap().0.sign_counterparty_payment_input(
+ &psbt.unsigned_tx,
+ input_idx,
+ &descriptor,
+ &secp_ctx,
+ )?;
psbt.inputs[input_idx].final_script_witness = Some(witness);
},
SpendableOutputDescriptor::DelayedPaymentOutput(descriptor) => {
- let input_idx = psbt.unsigned_tx.input.iter().position(|i| i.previous_output == descriptor.outpoint.into_bitcoin_outpoint()).ok_or(())?;
- if keys_cache.is_none() || keys_cache.as_ref().unwrap().1 != descriptor.channel_keys_id {
+ let input_idx = get_input_idx(&descriptor.outpoint)?;
+ if keys_cache.is_none()
+ || keys_cache.as_ref().unwrap().1 != descriptor.channel_keys_id
+ {
keys_cache = Some((
- self.derive_channel_keys(descriptor.channel_value_satoshis, &descriptor.channel_keys_id),
- descriptor.channel_keys_id));
+ self.derive_channel_keys(
+ descriptor.channel_value_satoshis,
+ &descriptor.channel_keys_id,
+ ),
+ descriptor.channel_keys_id,
+ ));
}
- let witness = keys_cache.as_ref().unwrap().0.sign_dynamic_p2wsh_input(&psbt.unsigned_tx, input_idx, &descriptor, &secp_ctx)?;
+ let witness = keys_cache.as_ref().unwrap().0.sign_dynamic_p2wsh_input(
+ &psbt.unsigned_tx,
+ input_idx,
+ &descriptor,
+ &secp_ctx,
+ )?;
psbt.inputs[input_idx].final_script_witness = Some(witness);
},
SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output, .. } => {
- let input_idx = psbt.unsigned_tx.input.iter().position(|i| i.previous_output == outpoint.into_bitcoin_outpoint()).ok_or(())?;
- let derivation_idx = if output.script_pubkey == self.destination_script {
- 1
- } else {
- 2
- };
+ let input_idx = get_input_idx(outpoint)?;
+ let derivation_idx =
+ if output.script_pubkey == self.destination_script { 1 } else { 2 };
let secret = {
// Note that when we aren't serializing the key, network doesn't matter
match ExtendedPrivKey::new_master(Network::Testnet, &self.seed) {
Ok(master_key) => {
- match master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(derivation_idx).expect("key space exhausted")) {
+ match master_key.ckd_priv(
+ &secp_ctx,
+ ChildNumber::from_hardened_idx(derivation_idx)
+ .expect("key space exhausted"),
+ ) {
Ok(key) => key,
Err(_) => panic!("Your RNG is busted"),
}
- }
+ },
Err(_) => panic!("Your rng is busted"),
}
};
if derivation_idx == 2 {
assert_eq!(pubkey.inner, self.shutdown_pubkey);
}
- let witness_script = bitcoin::Address::p2pkh(&pubkey, Network::Testnet).script_pubkey();
- let payment_script = bitcoin::Address::p2wpkh(&pubkey, Network::Testnet).expect("uncompressed key found").script_pubkey();
-
- if payment_script != output.script_pubkey { return Err(()); };
+ let witness_script =
+ bitcoin::Address::p2pkh(&pubkey, Network::Testnet).script_pubkey();
+ let payment_script = bitcoin::Address::p2wpkh(&pubkey, Network::Testnet)
+ .expect("uncompressed key found")
+ .script_pubkey();
+
+ if payment_script != output.script_pubkey {
+ return Err(());
+ };
- let sighash = hash_to_message!(&sighash::SighashCache::new(&psbt.unsigned_tx).segwit_signature_hash(input_idx, &witness_script, output.value, EcdsaSighashType::All).unwrap()[..]);
+ let sighash = hash_to_message!(
+ &sighash::SighashCache::new(&psbt.unsigned_tx)
+ .segwit_signature_hash(
+ input_idx,
+ &witness_script,
+ output.value,
+ EcdsaSighashType::All
+ )
+ .unwrap()[..]
+ );
let sig = sign_with_aux_rand(secp_ctx, &sighash, &secret.private_key, &self);
let mut sig_ser = sig.serialize_der().to_vec();
sig_ser.push(EcdsaSighashType::All as u8);
- let witness = Witness::from_slice(&[&sig_ser, &pubkey.inner.serialize().to_vec()]);
+ let witness =
+ Witness::from_slice(&[&sig_ser, &pubkey.inner.serialize().to_vec()]);
psbt.inputs[input_idx].final_script_witness = Some(witness);
},
}
Ok(psbt)
}
-
- /// Creates a [`Transaction`] which spends the given descriptors to the given outputs, plus an
- /// output to the given change destination (if sufficient change value remains). The
- /// transaction will have a feerate, at least, of the given value.
- ///
- /// The `locktime` argument is used to set the transaction's locktime. If `None`, the
- /// transaction will have a locktime of 0. It it recommended to set this to the current block
- /// height to avoid fee sniping, unless you have some specific reason to use a different
- /// locktime.
- ///
- /// Returns `Err(())` if the output value is greater than the input value minus required fee,
- /// if a descriptor was duplicated, or if an output descriptor `script_pubkey`
- /// does not match the one we can spend.
- ///
- /// We do not enforce that outputs meet the dust limit or that any output scripts are standard.
- ///
- /// May panic if the [`SpendableOutputDescriptor`]s were not generated by channels which used
- /// this [`KeysManager`] or one of the [`InMemorySigner`] created by this [`KeysManager`].
- pub fn spend_spendable_outputs<C: Signing>(&self, descriptors: &[&SpendableOutputDescriptor], outputs: Vec<TxOut>, change_destination_script: ScriptBuf, feerate_sat_per_1000_weight: u32, locktime: Option<LockTime>, secp_ctx: &Secp256k1<C>) -> Result<Transaction, ()> {
- let (mut psbt, expected_max_weight) = SpendableOutputDescriptor::create_spendable_outputs_psbt(descriptors, outputs, change_destination_script, feerate_sat_per_1000_weight, locktime)?;
- psbt = self.sign_spendable_outputs_psbt(descriptors, psbt, secp_ctx)?;
-
- let spend_tx = psbt.extract_tx();
-
- debug_assert!(expected_max_weight >= spend_tx.weight().to_wu());
- // Note that witnesses with a signature vary somewhat in size, so allow
- // `expected_max_weight` to overshoot by up to 3 bytes per input.
- debug_assert!(expected_max_weight <= spend_tx.weight().to_wu() + descriptors.len() as u64 * 3);
-
- Ok(spend_tx)
- }
}
impl EntropySource for KeysManager {
fn get_secure_random_bytes(&self) -> [u8; 32] {
- let index = self.rand_bytes_index.get_increment();
- let mut nonce = [0u8; 16];
- nonce[..8].copy_from_slice(&index.to_be_bytes());
- ChaCha20::get_single_block(&self.rand_bytes_unique_start, &nonce)
+ self.entropy_source.get_secure_random_bytes()
}
}
fn get_node_id(&self, recipient: Recipient) -> Result<PublicKey, ()> {
match recipient {
Recipient::Node => Ok(self.node_id.clone()),
- Recipient::PhantomNode => Err(())
+ Recipient::PhantomNode => Err(()),
}
}
- fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result<SharedSecret, ()> {
+ fn ecdh(
+ &self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>,
+ ) -> Result<SharedSecret, ()> {
let mut node_secret = match recipient {
Recipient::Node => Ok(self.node_secret.clone()),
- Recipient::PhantomNode => Err(())
+ Recipient::PhantomNode => Err(()),
}?;
if let Some(tweak) = tweak {
node_secret = node_secret.mul_tweak(tweak).map_err(|_| ())?;
self.inbound_payment_key.clone()
}
- fn sign_invoice(&self, hrp_bytes: &[u8], invoice_data: &[u5], recipient: Recipient) -> Result<RecoverableSignature, ()> {
+ fn sign_invoice(
+ &self, hrp_bytes: &[u8], invoice_data: &[u5], recipient: Recipient,
+ ) -> Result<RecoverableSignature, ()> {
let preimage = construct_invoice_preimage(&hrp_bytes, &invoice_data);
let secret = match recipient {
Recipient::Node => Ok(&self.node_secret),
- Recipient::PhantomNode => Err(())
+ Recipient::PhantomNode => Err(()),
}?;
- Ok(self.secp_ctx.sign_ecdsa_recoverable(&hash_to_message!(&Sha256::hash(&preimage).to_byte_array()), secret))
+ Ok(self.secp_ctx.sign_ecdsa_recoverable(
+ &hash_to_message!(&Sha256::hash(&preimage).to_byte_array()),
+ secret,
+ ))
}
fn sign_bolt12_invoice_request(
- &self, invoice_request: &UnsignedInvoiceRequest
+ &self, invoice_request: &UnsignedInvoiceRequest,
) -> Result<schnorr::Signature, ()> {
let message = invoice_request.tagged_hash().as_digest();
let keys = KeyPair::from_secret_key(&self.secp_ctx, &self.node_secret);
}
fn sign_bolt12_invoice(
- &self, invoice: &UnsignedBolt12Invoice
+ &self, invoice: &UnsignedBolt12Invoice,
) -> Result<schnorr::Signature, ()> {
let message = invoice.tagged_hash().as_digest();
let keys = KeyPair::from_secret_key(&self.secp_ctx, &self.node_secret);
}
}
+impl OutputSpender for KeysManager {
+ /// Creates a [`Transaction`] which spends the given descriptors to the given outputs, plus an
+ /// output to the given change destination (if sufficient change value remains).
+ ///
+ /// See [`OutputSpender::spend_spendable_outputs`] documentation for more information.
+ ///
+ /// We do not enforce that outputs meet the dust limit or that any output scripts are standard.
+ ///
+ /// May panic if the [`SpendableOutputDescriptor`]s were not generated by channels which used
+ /// this [`KeysManager`] or one of the [`InMemorySigner`] created by this [`KeysManager`].
+ fn spend_spendable_outputs<C: Signing>(
+ &self, descriptors: &[&SpendableOutputDescriptor], outputs: Vec<TxOut>,
+ change_destination_script: ScriptBuf, feerate_sat_per_1000_weight: u32,
+ locktime: Option<LockTime>, secp_ctx: &Secp256k1<C>,
+ ) -> Result<Transaction, ()> {
+ let (mut psbt, expected_max_weight) =
+ SpendableOutputDescriptor::create_spendable_outputs_psbt(
+ secp_ctx,
+ descriptors,
+ outputs,
+ change_destination_script,
+ feerate_sat_per_1000_weight,
+ locktime,
+ )?;
+ psbt = self.sign_spendable_outputs_psbt(descriptors, psbt, secp_ctx)?;
+
+ let spend_tx = psbt.extract_tx();
+
+ debug_assert!(expected_max_weight >= spend_tx.weight().to_wu());
+ // Note that witnesses with a signature vary somewhat in size, so allow
+ // `expected_max_weight` to overshoot by up to 3 bytes per input.
+ debug_assert!(
+ expected_max_weight <= spend_tx.weight().to_wu() + descriptors.len() as u64 * 3
+ );
+
+ Ok(spend_tx)
+ }
+}
+
impl SignerProvider for KeysManager {
type EcdsaSigner = InMemorySigner;
#[cfg(taproot)]
type TaprootSigner = InMemorySigner;
- fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, user_channel_id: u128) -> [u8; 32] {
+ fn generate_channel_keys_id(
+ &self, _inbound: bool, _channel_value_satoshis: u64, user_channel_id: u128,
+ ) -> [u8; 32] {
let child_idx = self.channel_child_index.fetch_add(1, Ordering::AcqRel);
// `child_idx` is the only thing guaranteed to make each channel unique without a restart
// (though `user_channel_id` should help, depending on user behavior). If it manages to
id
}
- fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
+ fn derive_channel_signer(
+ &self, channel_value_satoshis: u64, channel_keys_id: [u8; 32],
+ ) -> Self::EcdsaSigner {
self.derive_channel_keys(channel_value_satoshis, &channel_keys_id)
}
}
}
- fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result<SharedSecret, ()> {
+ fn ecdh(
+ &self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>,
+ ) -> Result<SharedSecret, ()> {
let mut node_secret = match recipient {
Recipient::Node => self.inner.node_secret.clone(),
Recipient::PhantomNode => self.phantom_secret.clone(),
self.inbound_payment_key.clone()
}
- fn sign_invoice(&self, hrp_bytes: &[u8], invoice_data: &[u5], recipient: Recipient) -> Result<RecoverableSignature, ()> {
+ fn sign_invoice(
+ &self, hrp_bytes: &[u8], invoice_data: &[u5], recipient: Recipient,
+ ) -> Result<RecoverableSignature, ()> {
let preimage = construct_invoice_preimage(&hrp_bytes, &invoice_data);
let secret = match recipient {
Recipient::Node => &self.inner.node_secret,
Recipient::PhantomNode => &self.phantom_secret,
};
- Ok(self.inner.secp_ctx.sign_ecdsa_recoverable(&hash_to_message!(&Sha256::hash(&preimage).to_byte_array()), secret))
+ Ok(self.inner.secp_ctx.sign_ecdsa_recoverable(
+ &hash_to_message!(&Sha256::hash(&preimage).to_byte_array()),
+ secret,
+ ))
}
fn sign_bolt12_invoice_request(
- &self, invoice_request: &UnsignedInvoiceRequest
+ &self, invoice_request: &UnsignedInvoiceRequest,
) -> Result<schnorr::Signature, ()> {
self.inner.sign_bolt12_invoice_request(invoice_request)
}
fn sign_bolt12_invoice(
- &self, invoice: &UnsignedBolt12Invoice
+ &self, invoice: &UnsignedBolt12Invoice,
) -> Result<schnorr::Signature, ()> {
self.inner.sign_bolt12_invoice(invoice)
}
}
}
+impl OutputSpender for PhantomKeysManager {
+ /// See [`OutputSpender::spend_spendable_outputs`] and [`KeysManager::spend_spendable_outputs`]
+ /// for documentation on this method.
+ fn spend_spendable_outputs<C: Signing>(
+ &self, descriptors: &[&SpendableOutputDescriptor], outputs: Vec<TxOut>,
+ change_destination_script: ScriptBuf, feerate_sat_per_1000_weight: u32,
+ locktime: Option<LockTime>, secp_ctx: &Secp256k1<C>,
+ ) -> Result<Transaction, ()> {
+ self.inner.spend_spendable_outputs(
+ descriptors,
+ outputs,
+ change_destination_script,
+ feerate_sat_per_1000_weight,
+ locktime,
+ secp_ctx,
+ )
+ }
+}
+
impl SignerProvider for PhantomKeysManager {
type EcdsaSigner = InMemorySigner;
#[cfg(taproot)]
type TaprootSigner = InMemorySigner;
- fn generate_channel_keys_id(&self, inbound: bool, channel_value_satoshis: u64, user_channel_id: u128) -> [u8; 32] {
+ fn generate_channel_keys_id(
+ &self, inbound: bool, channel_value_satoshis: u64, user_channel_id: u128,
+ ) -> [u8; 32] {
self.inner.generate_channel_keys_id(inbound, channel_value_satoshis, user_channel_id)
}
- fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
+ fn derive_channel_signer(
+ &self, channel_value_satoshis: u64, channel_keys_id: [u8; 32],
+ ) -> Self::EcdsaSigner {
self.inner.derive_channel_signer(channel_value_satoshis, channel_keys_id)
}
/// same across restarts, or else inbound payments may fail.
///
/// [phantom node payments]: PhantomKeysManager
- pub fn new(seed: &[u8; 32], starting_time_secs: u64, starting_time_nanos: u32, cross_node_seed: &[u8; 32]) -> Self {
+ pub fn new(
+ seed: &[u8; 32], starting_time_secs: u64, starting_time_nanos: u32,
+ cross_node_seed: &[u8; 32],
+ ) -> Self {
let inner = KeysManager::new(seed, starting_time_secs, starting_time_nanos);
- let (inbound_key, phantom_key) = hkdf_extract_expand_twice(b"LDK Inbound and Phantom Payment Key Expansion", cross_node_seed);
+ let (inbound_key, phantom_key) = hkdf_extract_expand_twice(
+ b"LDK Inbound and Phantom Payment Key Expansion",
+ cross_node_seed,
+ );
let phantom_secret = SecretKey::from_slice(&phantom_key).unwrap();
let phantom_node_id = PublicKey::from_secret_key(&inner.secp_ctx, &phantom_secret);
Self {
}
}
- /// See [`KeysManager::spend_spendable_outputs`] for documentation on this method.
- pub fn spend_spendable_outputs<C: Signing>(&self, descriptors: &[&SpendableOutputDescriptor], outputs: Vec<TxOut>, change_destination_script: ScriptBuf, feerate_sat_per_1000_weight: u32, locktime: Option<LockTime>, secp_ctx: &Secp256k1<C>) -> Result<Transaction, ()> {
- self.inner.spend_spendable_outputs(descriptors, outputs, change_destination_script, feerate_sat_per_1000_weight, locktime, secp_ctx)
- }
-
/// See [`KeysManager::derive_channel_keys`] for documentation on this method.
- pub fn derive_channel_keys(&self, channel_value_satoshis: u64, params: &[u8; 32]) -> InMemorySigner {
+ pub fn derive_channel_keys(
+ &self, channel_value_satoshis: u64, params: &[u8; 32],
+ ) -> InMemorySigner {
self.inner.derive_channel_keys(channel_value_satoshis, params)
}
}
}
+/// An implementation of [`EntropySource`] using ChaCha20.
+#[derive(Debug)]
+pub struct RandomBytes {
+ /// Seed from which all randomness produced is derived from.
+ seed: [u8; 32],
+ /// Tracks the number of times we've produced randomness to ensure we don't return the same
+ /// bytes twice.
+ index: AtomicCounter,
+}
+
+impl RandomBytes {
+ /// Creates a new instance using the given seed.
+ pub fn new(seed: [u8; 32]) -> Self {
+ Self { seed, index: AtomicCounter::new() }
+ }
+}
+
+impl EntropySource for RandomBytes {
+ fn get_secure_random_bytes(&self) -> [u8; 32] {
+ let index = self.index.get_increment();
+ let mut nonce = [0u8; 16];
+ nonce[..8].copy_from_slice(&index.to_be_bytes());
+ ChaCha20::get_single_block(&self.seed, &nonce)
+ }
+}
+
// Ensure that EcdsaChannelSigner can have a vtable
#[test]
pub fn dyn_sign() {
#[cfg(ldk_bench)]
pub mod benches {
- use std::sync::{Arc, mpsc};
+ use crate::sign::{EntropySource, KeysManager};
+ use bitcoin::blockdata::constants::genesis_block;
+ use bitcoin::Network;
use std::sync::mpsc::TryRecvError;
+ use std::sync::{mpsc, Arc};
use std::thread;
use std::time::Duration;
- use bitcoin::blockdata::constants::genesis_block;
- use bitcoin::Network;
- use crate::sign::{EntropySource, KeysManager};
use criterion::Criterion;
for _ in 1..5 {
let keys_manager_clone = Arc::clone(&keys_manager);
let (stop_sender, stop_receiver) = mpsc::channel();
- let handle = thread::spawn(move || {
- loop {
- keys_manager_clone.get_secure_random_bytes();
- match stop_receiver.try_recv() {
- Ok(_) | Err(TryRecvError::Disconnected) => {
- println!("Terminating.");
- break;
- }
- Err(TryRecvError::Empty) => {}
- }
+ let handle = thread::spawn(move || loop {
+ keys_manager_clone.get_secure_random_bytes();
+ match stop_receiver.try_recv() {
+ Ok(_) | Err(TryRecvError::Disconnected) => {
+ println!("Terminating.");
+ break;
+ },
+ Err(TryRecvError::Empty) => {},
}
});
handles.push(handle);
stops.push(stop_sender);
}
- bench.bench_function("get_secure_random_bytes", |b| b.iter(||
- keys_manager.get_secure_random_bytes()));
+ bench.bench_function("get_secure_random_bytes", |b| {
+ b.iter(|| keys_manager.get_secure_random_bytes())
+ });
for stop in stops {
let _ = stop.send(());
use alloc::vec::Vec;
use bitcoin::blockdata::transaction::Transaction;
use bitcoin::secp256k1;
-use bitcoin::secp256k1::{PublicKey, schnorr::Signature, Secp256k1, SecretKey};
+use bitcoin::secp256k1::{schnorr::Signature, PublicKey, Secp256k1, SecretKey};
use musig2::types::{PartialSignature, PublicNonce};
-use crate::ln::chan_utils::{ClosingTransaction, CommitmentTransaction, HolderCommitmentTransaction, HTLCOutputInCommitment};
+use crate::ln::chan_utils::{
+ ClosingTransaction, CommitmentTransaction, HTLCOutputInCommitment, HolderCommitmentTransaction,
+};
use crate::ln::msgs::PartialSignatureWithNonce;
use crate::ln::PaymentPreimage;
use crate::sign::{ChannelSigner, HTLCDescriptor};
pub trait TaprootChannelSigner: ChannelSigner {
/// Generate a local nonce pair, which requires committing to ahead of time.
/// The counterparty needs the public nonce generated herein to compute a partial signature.
- fn generate_local_nonce_pair(&self, commitment_number: u64, secp_ctx: &Secp256k1<secp256k1::All>) -> PublicNonce;
+ fn generate_local_nonce_pair(
+ &self, commitment_number: u64, secp_ctx: &Secp256k1<secp256k1::All>,
+ ) -> PublicNonce;
/// Create a signature for a counterparty's commitment transaction and associated HTLC transactions.
///
/// irrelevant or duplicate preimages.
//
// TODO: Document the things someone using this interface should enforce before signing.
- fn partially_sign_counterparty_commitment(&self, counterparty_nonce: PublicNonce,
- commitment_tx: &CommitmentTransaction,
+ fn partially_sign_counterparty_commitment(
+ &self, counterparty_nonce: PublicNonce, commitment_tx: &CommitmentTransaction,
inbound_htlc_preimages: Vec<PaymentPreimage>,
outbound_htlc_preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<secp256k1::All>,
) -> Result<(PartialSignatureWithNonce, Vec<Signature>), ()>;
/// An external signer implementation should check that the commitment has not been revoked.
///
// TODO: Document the things someone using this interface should enforce before signing.
- fn finalize_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction,
+ fn finalize_holder_commitment(
+ &self, commitment_tx: &HolderCommitmentTransaction,
counterparty_partial_signature: PartialSignatureWithNonce,
- secp_ctx: &Secp256k1<secp256k1::All>
+ secp_ctx: &Secp256k1<secp256k1::All>,
) -> Result<PartialSignature, ()>;
/// Create a signature for the given input in a transaction spending an HTLC transaction output
/// revoked the state which they eventually broadcast. It's not a _holder_ secret key and does
/// not allow the spending of any funds by itself (you need our holder `revocation_secret` to do
/// so).
- fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64,
- per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>,
+ fn sign_justice_revoked_output(
+ &self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey,
+ secp_ctx: &Secp256k1<secp256k1::All>,
) -> Result<Signature, ()>;
/// Create a signature for the given input in a transaction spending a commitment transaction
///
/// `htlc` holds HTLC elements (hash, timelock), thus changing the format of the witness script
/// (which is committed to in the BIP 341 signatures).
- fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64,
- per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment,
- secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
+ fn sign_justice_revoked_htlc(
+ &self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey,
+ htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>,
+ ) -> Result<Signature, ()>;
/// Computes the signature for a commitment transaction's HTLC output used as an input within
/// `htlc_tx`, which spends the commitment transaction at index `input`. The signature returned
///
/// [`TapSighashType::Default`]: bitcoin::sighash::TapSighashType::Default
/// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor
- fn sign_holder_htlc_transaction(&self, htlc_tx: &Transaction, input: usize,
- htlc_descriptor: &HTLCDescriptor, secp_ctx: &Secp256k1<secp256k1::All>,
+ fn sign_holder_htlc_transaction(
+ &self, htlc_tx: &Transaction, input: usize, htlc_descriptor: &HTLCDescriptor,
+ secp_ctx: &Secp256k1<secp256k1::All>,
) -> Result<Signature, ()>;
/// Create a signature for a claiming transaction for a HTLC output on a counterparty's commitment
/// detected onchain. It has been generated by our counterparty and is used to derive
/// channel state keys, which are then included in the witness script and committed to in the
/// BIP 341 signature.
- fn sign_counterparty_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, amount: u64,
- per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment,
- secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()>;
+ fn sign_counterparty_htlc_transaction(
+ &self, htlc_tx: &Transaction, input: usize, amount: u64, per_commitment_point: &PublicKey,
+ htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>,
+ ) -> Result<Signature, ()>;
/// Create a signature for a (proposed) closing transaction.
///
/// Note that, due to rounding, there may be one "missing" satoshi, and either party may have
/// chosen to forgo their output as dust.
- fn partially_sign_closing_transaction(&self, closing_tx: &ClosingTransaction,
- secp_ctx: &Secp256k1<secp256k1::All>) -> Result<PartialSignature, ()>;
+ fn partially_sign_closing_transaction(
+ &self, closing_tx: &ClosingTransaction, secp_ctx: &Secp256k1<secp256k1::All>,
+ ) -> Result<PartialSignature, ()>;
/// Computes the signature for a commitment transaction's anchor output used as an
/// input within `anchor_tx`, which spends the commitment transaction, at index `input`.
-use core::ops::Deref;
use crate::sign::{ChannelSigner, SignerProvider};
+use core::ops::Deref;
-pub(crate) enum ChannelSignerType<SP: Deref> where SP::Target: SignerProvider {
+pub(crate) enum ChannelSignerType<SP: Deref>
+where
+ SP::Target: SignerProvider,
+{
// in practice, this will only ever be an EcdsaChannelSigner (specifically, Writeable)
Ecdsa(<SP::Target as SignerProvider>::EcdsaSigner),
#[cfg(taproot)]
Taproot(<SP::Target as SignerProvider>::TaprootSigner),
}
-impl<SP: Deref> ChannelSignerType<SP> where SP::Target: SignerProvider {
+impl<SP: Deref> ChannelSignerType<SP>
+where
+ SP::Target: SignerProvider,
+{
pub(crate) fn as_ref(&self) -> &dyn ChannelSigner {
match self {
ChannelSignerType::Ecdsa(ecs) => ecs,
pub(crate) fn as_ecdsa(&self) -> Option<&<SP::Target as SignerProvider>::EcdsaSigner> {
match self {
ChannelSignerType::Ecdsa(ecs) => Some(ecs),
- _ => None
+ _ => None,
}
}
#[allow(unused)]
- pub(crate) fn as_mut_ecdsa(&mut self) -> Option<&mut <SP::Target as SignerProvider>::EcdsaSigner> {
+ pub(crate) fn as_mut_ecdsa(
+ &mut self,
+ ) -> Option<&mut <SP::Target as SignerProvider>::EcdsaSigner> {
match self {
ChannelSignerType::Ecdsa(ecs) => Some(ecs),
- _ => None
+ _ => None,
}
}
}
pub use std::sync::WaitTimeoutResult;
-use crate::prelude::HashMap;
+use crate::prelude::*;
use super::{LockTestExt, LockHeldState};
thread_local! {
/// We track the set of locks currently held by a reference to their `LockMetadata`
- static LOCKS_HELD: RefCell<HashMap<u64, Arc<LockMetadata>>> = RefCell::new(HashMap::new());
+ static LOCKS_HELD: RefCell<HashMap<u64, Arc<LockMetadata>>> = RefCell::new(new_hash_map());
}
static LOCK_IDX: AtomicUsize = AtomicUsize::new(0);
}
}
}
- let symbol = symbol_after_latest_debug_sync.expect("Couldn't find lock call symbol");
+ let symbol = symbol_after_latest_debug_sync.unwrap_or_else(|| {
+ panic!("Couldn't find lock call symbol in trace {:?}", backtrace);
+ });
(format!("{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap()), symbol.colno())
}
let lock_idx = LOCK_IDX.fetch_add(1, Ordering::Relaxed) as u64;
let res = Arc::new(LockMetadata {
- locked_before: StdMutex::new(HashMap::new()),
+ locked_before: StdMutex::new(new_hash_map()),
lock_idx,
_lock_construction_bt: backtrace,
});
{
let (lock_constr_location, lock_constr_colno) =
locate_call_symbol(&res._lock_construction_bt);
- LOCKS_INIT.call_once(|| { unsafe { LOCKS = Some(StdMutex::new(HashMap::new())); } });
+ LOCKS_INIT.call_once(|| { unsafe { LOCKS = Some(StdMutex::new(new_hash_map())); } });
let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap();
match locks.entry(lock_constr_location) {
hash_map::Entry::Occupied(e) => {
// Apache License, Version 2.0, (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or
// MIT license (LICENSE-MIT or http://opensource.org/licenses/MIT) at your option.
-
+#[allow(unused)]
use crate::prelude::*;
/// RFC4648 encoding table
use crate::ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO;
use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT};
+#[cfg(fuzzing)]
+use crate::util::ser::Readable;
+
/// Configuration we set when applicable.
///
/// Default::default() provides sane defaults.
}
}
+// When fuzzing, we want to allow the fuzzer to pick any configuration parameters. Thus, we
+// implement Readable here in a naive way (which is a bit easier for the fuzzer to handle). We
+// don't really want to ever expose this to users (if we did we'd want to use TLVs).
+#[cfg(fuzzing)]
+impl Readable for ChannelHandshakeConfig {
+ fn read<R: crate::io::Read>(reader: &mut R) -> Result<Self, crate::ln::msgs::DecodeError> {
+ Ok(Self {
+ minimum_depth: Readable::read(reader)?,
+ our_to_self_delay: Readable::read(reader)?,
+ our_htlc_minimum_msat: Readable::read(reader)?,
+ max_inbound_htlc_value_in_flight_percent_of_channel: Readable::read(reader)?,
+ negotiate_scid_privacy: Readable::read(reader)?,
+ announced_channel: Readable::read(reader)?,
+ commit_upfront_shutdown_pubkey: Readable::read(reader)?,
+ their_channel_reserve_proportional_millionths: Readable::read(reader)?,
+ negotiate_anchors_zero_fee_htlc_tx: Readable::read(reader)?,
+ our_max_accepted_htlcs: Readable::read(reader)?,
+ })
+ }
+}
+
/// Optional channel limits which are applied during channel creation.
///
/// These limits are only applied to our counterparty's limits, not our own.
}
}
+// When fuzzing, we want to allow the fuzzer to pick any configuration parameters. Thus, we
+// implement Readable here in a naive way (which is a bit easier for the fuzzer to handle). We
+// don't really want to ever expose this to users (if we did we'd want to use TLVs).
+#[cfg(fuzzing)]
+impl Readable for ChannelHandshakeLimits {
+ fn read<R: crate::io::Read>(reader: &mut R) -> Result<Self, crate::ln::msgs::DecodeError> {
+ Ok(Self {
+ min_funding_satoshis: Readable::read(reader)?,
+ max_funding_satoshis: Readable::read(reader)?,
+ max_htlc_minimum_msat: Readable::read(reader)?,
+ min_max_htlc_value_in_flight_msat: Readable::read(reader)?,
+ max_channel_reserve_satoshis: Readable::read(reader)?,
+ min_max_accepted_htlcs: Readable::read(reader)?,
+ trust_own_funding_0conf: Readable::read(reader)?,
+ max_minimum_depth: Readable::read(reader)?,
+ force_announced_channel_preference: Readable::read(reader)?,
+ their_to_self_delay: Readable::read(reader)?,
+ })
+ }
+}
+
/// Options for how to set the max dust HTLC exposure allowed on a channel. See
/// [`ChannelConfig::max_dust_htlc_exposure`] for details.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
}
}
}
+
+// When fuzzing, we want to allow the fuzzer to pick any configuration parameters. Thus, we
+// implement Readable here in a naive way (which is a bit easier for the fuzzer to handle). We
+// don't really want to ever expose this to users (if we did we'd want to use TLVs).
+#[cfg(fuzzing)]
+impl Readable for UserConfig {
+ fn read<R: crate::io::Read>(reader: &mut R) -> Result<Self, crate::ln::msgs::DecodeError> {
+ Ok(Self {
+ channel_handshake_config: Readable::read(reader)?,
+ channel_handshake_limits: Readable::read(reader)?,
+ channel_config: Readable::read(reader)?,
+ accept_forwards_to_priv_channels: Readable::read(reader)?,
+ accept_inbound_channels: Readable::read(reader)?,
+ manually_accept_inbound_channels: Readable::read(reader)?,
+ accept_intercept_htlcs: Readable::read(reader)?,
+ accept_mpp_keysend: Readable::read(reader)?,
+ })
+ }
+}
use crate::ln::script::ShutdownScript;
-use alloc::string::String;
+#[allow(unused_imports)]
+use crate::prelude::*;
+
use core::fmt;
/// Indicates an error on the client's part (usually some variant of attempting to use too-low or
--- /dev/null
+//! Generally LDK uses `std`'s `HashMap`s, however when building for no-std, LDK uses `hashbrown`'s
+//! `HashMap`s with the `std` `SipHasher` and uses `getrandom` to opportunistically randomize it,
+//! if randomization is available.
+//!
+//! This module simply re-exports the `HashMap` used in LDK for public consumption.
+
+#[cfg(feature = "hashbrown")]
+extern crate hashbrown;
+#[cfg(feature = "possiblyrandom")]
+extern crate possiblyrandom;
+
+// For no-std builds, we need to use hashbrown, however, by default, it doesn't randomize the
+// hashing and is vulnerable to HashDoS attacks. Thus, we use the core SipHasher when not using
+// std, but use `getrandom` to randomize it if its available.
+
+#[cfg(not(feature = "hashbrown"))]
+mod std_hashtables {
+ pub use std::collections::hash_map::RandomState;
+ pub use std::collections::HashMap;
+
+ pub(crate) use std::collections::{hash_map, HashSet};
+
+ pub(crate) type OccupiedHashMapEntry<'a, K, V> =
+ std::collections::hash_map::OccupiedEntry<'a, K, V>;
+ pub(crate) type VacantHashMapEntry<'a, K, V> =
+ std::collections::hash_map::VacantEntry<'a, K, V>;
+
+ /// Builds a new [`HashMap`].
+ pub fn new_hash_map<K, V>() -> HashMap<K, V> {
+ HashMap::new()
+ }
+ /// Builds a new [`HashMap`] with the given capacity.
+ pub fn hash_map_with_capacity<K, V>(cap: usize) -> HashMap<K, V> {
+ HashMap::with_capacity(cap)
+ }
+ pub(crate) fn hash_map_from_iter<
+ K: core::hash::Hash + Eq,
+ V,
+ I: IntoIterator<Item = (K, V)>,
+ >(
+ iter: I,
+ ) -> HashMap<K, V> {
+ HashMap::from_iter(iter)
+ }
+
+ pub(crate) fn new_hash_set<K>() -> HashSet<K> {
+ HashSet::new()
+ }
+ pub(crate) fn hash_set_with_capacity<K>(cap: usize) -> HashSet<K> {
+ HashSet::with_capacity(cap)
+ }
+ pub(crate) fn hash_set_from_iter<K: core::hash::Hash + Eq, I: IntoIterator<Item = K>>(
+ iter: I,
+ ) -> HashSet<K> {
+ HashSet::from_iter(iter)
+ }
+}
+#[cfg(not(feature = "hashbrown"))]
+pub use std_hashtables::*;
+
+#[cfg(feature = "hashbrown")]
+pub(crate) use self::hashbrown::hash_map;
+
+#[cfg(feature = "hashbrown")]
+mod hashbrown_tables {
+ #[cfg(feature = "std")]
+ mod hasher {
+ pub use std::collections::hash_map::RandomState;
+ }
+ #[cfg(not(feature = "std"))]
+ mod hasher {
+ #![allow(deprecated)] // hash::SipHasher was deprecated in favor of something only in std.
+ use core::hash::{BuildHasher, SipHasher};
+
+ #[derive(Clone, Copy)]
+ /// A simple implementation of [`BuildHasher`] that uses `getrandom` to opportunistically
+ /// randomize, if the platform supports it.
+ pub struct RandomState {
+ k0: u64,
+ k1: u64,
+ }
+
+ impl RandomState {
+ /// Constructs a new [`RandomState`] which may or may not be random, depending on the
+ /// target platform.
+ pub fn new() -> RandomState {
+ let (k0, k1);
+ #[cfg(all(not(fuzzing), feature = "possiblyrandom"))]
+ {
+ let mut keys = [0; 16];
+ possiblyrandom::getpossiblyrandom(&mut keys);
+
+ let mut k0_bytes = [0; 8];
+ let mut k1_bytes = [0; 8];
+ k0_bytes.copy_from_slice(&keys[..8]);
+ k1_bytes.copy_from_slice(&keys[8..]);
+ k0 = u64::from_le_bytes(k0_bytes);
+ k1 = u64::from_le_bytes(k1_bytes);
+ }
+ #[cfg(any(fuzzing, not(feature = "possiblyrandom")))]
+ {
+ k0 = 0;
+ k1 = 0;
+ }
+ RandomState { k0, k1 }
+ }
+ }
+
+ impl Default for RandomState {
+ fn default() -> RandomState {
+ RandomState::new()
+ }
+ }
+
+ impl BuildHasher for RandomState {
+ type Hasher = SipHasher;
+ fn build_hasher(&self) -> SipHasher {
+ SipHasher::new_with_keys(self.k0, self.k1)
+ }
+ }
+ }
+
+ use super::*;
+ pub use hasher::*;
+
+ /// The HashMap type used in LDK.
+ pub type HashMap<K, V> = hashbrown::HashMap<K, V, RandomState>;
+ pub(crate) type HashSet<K> = hashbrown::HashSet<K, RandomState>;
+
+ pub(crate) type OccupiedHashMapEntry<'a, K, V> =
+ hashbrown::hash_map::OccupiedEntry<'a, K, V, RandomState>;
+ pub(crate) type VacantHashMapEntry<'a, K, V> =
+ hashbrown::hash_map::VacantEntry<'a, K, V, RandomState>;
+
+ /// Builds a new [`HashMap`].
+ pub fn new_hash_map<K, V>() -> HashMap<K, V> {
+ HashMap::with_hasher(RandomState::new())
+ }
+ /// Builds a new [`HashMap`] with the given capacity.
+ pub fn hash_map_with_capacity<K, V>(cap: usize) -> HashMap<K, V> {
+ HashMap::with_capacity_and_hasher(cap, RandomState::new())
+ }
+ pub(crate) fn hash_map_from_iter<
+ K: core::hash::Hash + Eq,
+ V,
+ I: IntoIterator<Item = (K, V)>,
+ >(
+ iter: I,
+ ) -> HashMap<K, V> {
+ let iter = iter.into_iter();
+ let min_size = iter.size_hint().0;
+ let mut res = HashMap::with_capacity_and_hasher(min_size, RandomState::new());
+ res.extend(iter);
+ res
+ }
+
+ pub(crate) fn new_hash_set<K>() -> HashSet<K> {
+ HashSet::with_hasher(RandomState::new())
+ }
+ pub(crate) fn hash_set_with_capacity<K>(cap: usize) -> HashSet<K> {
+ HashSet::with_capacity_and_hasher(cap, RandomState::new())
+ }
+ pub(crate) fn hash_set_from_iter<K: core::hash::Hash + Eq, I: IntoIterator<Item = K>>(
+ iter: I,
+ ) -> HashSet<K> {
+ let iter = iter.into_iter();
+ let min_size = iter.size_hint().0;
+ let mut res = HashSet::with_capacity_and_hasher(min_size, RandomState::new());
+ res.extend(iter);
+ res
+ }
+}
+#[cfg(feature = "hashbrown")]
+pub use hashbrown_tables::*;
//! This module has a map which can be iterated in a deterministic order. See the [`IndexedMap`].
-use crate::prelude::{HashMap, hash_map};
-use alloc::vec::Vec;
+use crate::prelude::*;
use alloc::slice::Iter;
use core::hash::Hash;
-use core::cmp::Ord;
use core::ops::{Bound, RangeBounds};
/// A map which can be iterated in a deterministic order.
/// Constructs a new, empty map
pub fn new() -> Self {
Self {
- map: HashMap::new(),
+ map: new_hash_map(),
keys: Vec::new(),
}
}
/// Constructs a new, empty map with the given capacity pre-allocated
pub fn with_capacity(capacity: usize) -> Self {
Self {
- map: HashMap::with_capacity(capacity),
+ map: hash_map_with_capacity(capacity),
keys: Vec::with_capacity(capacity),
}
}
self.map.get_mut(key)
}
+ /// Fetches the key-value pair corresponding to the supplied key, if one exists.
+ pub fn get_key_value(&self, key: &K) -> Option<(&K, &V)> {
+ self.map.get_key_value(key)
+ }
+
#[inline]
/// Returns true if an element with the given `key` exists in the map.
pub fn contains_key(&self, key: &K) -> bool {
///
/// This is not exported to bindings users as bindings provide alternate accessors rather than exposing maps directly.
pub struct VacantEntry<'a, K: Hash + Ord, V> {
- #[cfg(feature = "hashbrown")]
- underlying_entry: hash_map::VacantEntry<'a, K, V, hash_map::DefaultHashBuilder>,
- #[cfg(not(feature = "hashbrown"))]
- underlying_entry: hash_map::VacantEntry<'a, K, V>,
+ underlying_entry: VacantHashMapEntry<'a, K, V>,
key: K,
keys: &'a mut Vec<K>,
}
///
/// This is not exported to bindings users as bindings provide alternate accessors rather than exposing maps directly.
pub struct OccupiedEntry<'a, K: Hash + Ord, V> {
- #[cfg(feature = "hashbrown")]
- underlying_entry: hash_map::OccupiedEntry<'a, K, V, hash_map::DefaultHashBuilder>,
- #[cfg(not(feature = "hashbrown"))]
- underlying_entry: hash_map::OccupiedEntry<'a, K, V>,
+ underlying_entry: OccupiedHashMapEntry<'a, K, V>,
keys: &'a mut Vec<K>,
}
//! Low level invoice utilities.
use bitcoin::bech32::{u5, FromBase32};
+
+#[allow(unused)]
use crate::prelude::*;
/// Construct the invoice's HRP and signatureless data into a preimage to be hashed.
}
}
+macro_rules! impl_record {
+ ($($args: lifetime)?, $($nonstruct_args: lifetime)?) => {
/// A Record, unit of logging output with Metadata to enable filtering
/// Module_path, file, line to inform on log's source
#[derive(Clone, Debug)]
-pub struct Record<'a> {
+pub struct Record<$($args)?> {
/// The verbosity level of the message.
pub level: Level,
/// The node id of the peer pertaining to the logged record.
pub file: &'static str,
/// The line containing the message.
pub line: u32,
-
- #[cfg(c_bindings)]
- /// We don't actually use the lifetime parameter in C bindings (as there is no good way to
- /// communicate a lifetime to a C, or worse, Java user).
- _phantom: core::marker::PhantomData<&'a ()>,
}
-impl<'a> Record<'a> {
+impl<$($args)?> Record<$($args)?> {
/// Returns a new Record.
///
/// This is not exported to bindings users as fmt can't be used in C
#[inline]
- pub fn new(
+ pub fn new<$($nonstruct_args)?>(
level: Level, peer_id: Option<PublicKey>, channel_id: Option<ChannelId>,
args: fmt::Arguments<'a>, module_path: &'static str, file: &'static str, line: u32
- ) -> Record<'a> {
+ ) -> Record<$($args)?> {
Record {
level,
peer_id,
module_path,
file,
line,
- #[cfg(c_bindings)]
- _phantom: core::marker::PhantomData,
}
}
}
+} }
+#[cfg(not(c_bindings))]
+impl_record!('a, );
+#[cfg(c_bindings)]
+impl_record!(, 'a);
/// A trait encapsulating the operations required of a logger.
pub trait Logger {
}
/// Adds relevant context to a [`Record`] before passing it to the wrapped [`Logger`].
+///
+/// This is not exported to bindings users as lifetimes are problematic and there's little reason
+/// for this to be used downstream anyway.
pub struct WithContext<'a, L: Deref> where L::Target: Logger {
/// The logger to delegate to after adding context to the record.
logger: &'a L,
// You may not use this file except in accordance with one or both of these
// licenses.
-use crate::chain::transaction::OutPoint;
+use crate::ln::ChannelId;
use crate::sign::SpendableOutputDescriptor;
-use bitcoin::hash_types::Txid;
use bitcoin::blockdata::transaction::Transaction;
use crate::routing::router::Route;
}
}
-pub(crate) struct DebugFundingChannelId<'a>(pub &'a Txid, pub u16);
-impl<'a> core::fmt::Display for DebugFundingChannelId<'a> {
+pub(crate) struct DebugFundingInfo<'a>(pub &'a ChannelId);
+impl<'a> core::fmt::Display for DebugFundingInfo<'a> {
fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
- (OutPoint { txid: self.0.clone(), index: self.1 }).to_channel_id().fmt(f)
- }
-}
-macro_rules! log_funding_channel_id {
- ($funding_txid: expr, $funding_txo: expr) => {
- $crate::util::macro_logger::DebugFundingChannelId(&$funding_txid, $funding_txo)
- }
-}
-
-pub(crate) struct DebugFundingInfo<'a, T: 'a>(pub &'a (OutPoint, T));
-impl<'a, T> core::fmt::Display for DebugFundingInfo<'a, T> {
- fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
- (self.0).0.to_channel_id().fmt(f)
+ self.0.fmt(f)
}
}
macro_rules! log_funding_info {
($key_storage: expr) => {
- $crate::util::macro_logger::DebugFundingInfo(&$key_storage.get_funding_txo())
+ $crate::util::macro_logger::DebugFundingInfo(
+ &$key_storage.channel_id()
+ )
}
}
//! <https://lightning.readthedocs.io/lightning-signmessage.7.html>
//! <https://api.lightning.community/#signmessage>
+#[allow(unused)]
use crate::prelude::*;
use crate::util::base32;
use bitcoin::hashes::{sha256d, Hash};
pub mod message_signing;
pub mod invoice;
pub mod persist;
+pub mod scid_utils;
pub mod string;
+pub mod sweep;
pub mod wakers;
#[cfg(fuzzing)]
pub mod base32;
pub(crate) mod atomic_counter;
pub(crate) mod byte_utils;
pub(crate) mod transaction_utils;
-pub(crate) mod scid_utils;
pub(crate) mod time;
+pub mod hash_tables;
pub mod indexed_map;
//! This module contains a simple key-value store trait [`KVStore`] that
//! allows one to implement the persistence for [`ChannelManager`], [`NetworkGraph`],
//! and [`ChannelMonitor`] all in one place.
+//!
+//! [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
use core::cmp;
-use core::convert::{TryFrom, TryInto};
use core::ops::Deref;
use core::str::FromStr;
use bitcoin::{BlockHash, Txid};
use crate::{io, log_error};
-use crate::alloc::string::ToString;
use crate::prelude::*;
use crate::chain;
use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
use crate::chain::chainmonitor::{Persist, MonitorUpdateId};
-use crate::sign::{EntropySource, NodeSigner, ecdsa::WriteableEcdsaChannelSigner, SignerProvider};
+use crate::sign::{EntropySource, ecdsa::WriteableEcdsaChannelSigner, SignerProvider};
use crate::chain::transaction::OutPoint;
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID};
-use crate::ln::channelmanager::ChannelManager;
-use crate::routing::router::Router;
+use crate::ln::channelmanager::AChannelManager;
use crate::routing::gossip::NetworkGraph;
use crate::routing::scoring::WriteableScore;
use crate::util::logger::Logger;
pub const KVSTORE_NAMESPACE_KEY_MAX_LEN: usize = 120;
/// The primary namespace under which the [`ChannelManager`] will be persisted.
+///
+/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
pub const CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
/// The secondary namespace under which the [`ChannelManager`] will be persisted.
+///
+/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
pub const CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
/// The key under which the [`ChannelManager`] will be persisted.
+///
+/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
pub const CHANNEL_MANAGER_PERSISTENCE_KEY: &str = "manager";
/// The primary namespace under which [`ChannelMonitor`]s will be persisted.
/// The primary namespace under which [`ChannelMonitorUpdate`]s will be persisted.
pub const CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitor_updates";
+/// The primary namespace under which archived [`ChannelMonitor`]s will be persisted.
+pub const ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE: &str = "archived_monitors";
+/// The secondary namespace under which archived [`ChannelMonitor`]s will be persisted.
+pub const ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
+
/// The primary namespace under which the [`NetworkGraph`] will be persisted.
pub const NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
/// The secondary namespace under which the [`NetworkGraph`] will be persisted.
/// The key under which the [`WriteableScore`] will be persisted.
pub const SCORER_PERSISTENCE_KEY: &str = "scorer";
+/// The primary namespace under which [`OutputSweeper`] state will be persisted.
+///
+/// [`OutputSweeper`]: crate::util::sweep::OutputSweeper
+pub const OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
+/// The secondary namespace under which [`OutputSweeper`] state will be persisted.
+///
+/// [`OutputSweeper`]: crate::util::sweep::OutputSweeper
+pub const OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
+/// The secondary namespace under which [`OutputSweeper`] state will be persisted.
+/// The key under which [`OutputSweeper`] state will be persisted.
+///
+/// [`OutputSweeper`]: crate::util::sweep::OutputSweeper
+pub const OUTPUT_SWEEPER_PERSISTENCE_KEY: &str = "output_sweeper";
+
/// A sentinel value to be prepended to monitors persisted by the [`MonitorUpdatingPersister`].
///
/// This serves to prevent someone from accidentally loading such monitors (which may need
}
/// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk.
-pub trait Persister<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>>
- where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
- T::Target: 'static + BroadcasterInterface,
- ES::Target: 'static + EntropySource,
- NS::Target: 'static + NodeSigner,
- SP::Target: 'static + SignerProvider,
- F::Target: 'static + FeeEstimator,
- R::Target: 'static + Router,
- L::Target: 'static + Logger,
+///
+/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
+pub trait Persister<'a, CM: Deref, L: Deref, S: WriteableScore<'a>>
+where
+ CM::Target: 'static + AChannelManager,
+ L::Target: 'static + Logger,
{
/// Persist the given ['ChannelManager'] to disk, returning an error if persistence failed.
- fn persist_manager(&self, channel_manager: &ChannelManager<M, T, ES, NS, SP, F, R, L>) -> Result<(), io::Error>;
+ ///
+ /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
+ fn persist_manager(&self, channel_manager: &CM) -> Result<(), io::Error>;
/// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error>;
}
-impl<'a, A: KVStore, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, M, T, ES, NS, SP, F, R, L, S> for A
- where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
- T::Target: 'static + BroadcasterInterface,
- ES::Target: 'static + EntropySource,
- NS::Target: 'static + NodeSigner,
- SP::Target: 'static + SignerProvider,
- F::Target: 'static + FeeEstimator,
- R::Target: 'static + Router,
- L::Target: 'static + Logger,
+impl<'a, A: KVStore + ?Sized, CM: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, CM, L, S> for A
+where
+ CM::Target: 'static + AChannelManager,
+ L::Target: 'static + Logger,
{
- /// Persist the given [`ChannelManager`] to disk, returning an error if persistence failed.
- fn persist_manager(&self, channel_manager: &ChannelManager<M, T, ES, NS, SP, F, R, L>) -> Result<(), io::Error> {
+ fn persist_manager(&self, channel_manager: &CM) -> Result<(), io::Error> {
self.write(CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE,
CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE,
CHANNEL_MANAGER_PERSISTENCE_KEY,
- &channel_manager.encode())
+ &channel_manager.get_cm().encode())
}
- /// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error> {
self.write(NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE,
NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE,
&network_graph.encode())
}
- /// Persist the given [`WriteableScore`] to disk, returning an error if persistence failed.
fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> {
self.write(SCORER_PERSISTENCE_PRIMARY_NAMESPACE,
SCORER_PERSISTENCE_SECONDARY_NAMESPACE,
}
}
-impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore> Persist<ChannelSigner> for K {
+impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore + ?Sized> Persist<ChannelSigner> for K {
// TODO: We really need a way for the persister to inform the user that its time to crash/shut
// down once these start returning failure.
// Then we should return InProgress rather than UnrecoverableError, implying we should probably
Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
}
}
+
+ fn archive_persisted_channel(&self, funding_txo: OutPoint) {
+ let monitor_name = MonitorName::from(funding_txo);
+ let monitor = match self.read(
+ CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
+ CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
+ monitor_name.as_str(),
+ ) {
+ Ok(monitor) => monitor,
+ Err(_) => return
+ };
+ match self.write(
+ ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
+ ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
+ monitor_name.as_str(),
+ &monitor,
+ ) {
+ Ok(()) => {}
+ Err(_e) => return
+ };
+ let _ = self.remove(
+ CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
+ CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
+ monitor_name.as_str(),
+ true,
+ );
+ }
}
/// Read previously persisted [`ChannelMonitor`]s from the store.
self.persist_new_channel(funding_txo, monitor, monitor_update_call_id)
}
}
+
+ fn archive_persisted_channel(&self, funding_txo: OutPoint) {
+ let monitor_name = MonitorName::from(funding_txo);
+ let monitor = match self.read_monitor(&monitor_name) {
+ Ok((_block_hash, monitor)) => monitor,
+ Err(_) => return
+ };
+ match self.kv_store.write(
+ ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
+ ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
+ monitor_name.as_str(),
+ &monitor.encode()
+ ) {
+ Ok(()) => {},
+ Err(_e) => return,
+ };
+ let _ = self.kv_store.remove(
+ CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
+ CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
+ monitor_name.as_str(),
+ true,
+ );
+ }
}
impl<K: Deref, L: Deref, ES: Deref, SP: Deref> MonitorUpdatingPersister<K, L, ES, SP>
#[cfg(test)]
mod tests {
use super::*;
- use crate::chain::chainmonitor::Persist;
use crate::chain::ChannelMonitorUpdateStatus;
use crate::events::{ClosureReason, MessageSendEventsProvider};
use crate::ln::functional_test_utils::*;
use crate::util::test_utils::{self, TestLogger, TestStore};
use crate::{check_added_monitors, check_closed_broadcast};
+ use crate::sync::Arc;
+ use crate::util::test_channel_signer::TestChannelSigner;
const EXPECTED_UPDATES_PER_PAYMENT: u64 = 5;
{
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
- let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
+ let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
- let cmu = &cmu_map.get(&added_monitors[0].0.to_channel_id()).unwrap()[0];
+ let cmu = &cmu_map.get(&added_monitors[0].1.channel_id()).unwrap()[0];
let test_txo = OutPoint { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
let ro_persister = MonitorUpdatingPersister {
.read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str())
.is_err());
}
+
+ fn persist_fn<P: Deref, ChannelSigner: WriteableEcdsaChannelSigner>(_persist: P) -> bool where P::Target: Persist<ChannelSigner> {
+ true
+ }
+
+ #[test]
+ fn kvstore_trait_object_usage() {
+ let store: Arc<dyn KVStore + Send + Sync> = Arc::new(TestStore::new(false));
+ assert!(persist_fn::<_, TestChannelSigner>(store.clone()));
+ }
}
// You may not use this file except in accordance with one or both of these
// licenses.
+//! Utilities for creating and parsing short channel ids.
+
/// Maximum block height that can be used in a `short_channel_id`. This
/// value is based on the 3-bytes available for block height.
pub const MAX_SCID_BLOCK: u64 = 0x00ffffff;
/// A `short_channel_id` construction error
#[derive(Debug, PartialEq, Eq)]
pub enum ShortChannelIdError {
+ /// Block height too high
BlockOverflow,
+ /// Tx index too high
TxIndexOverflow,
+ /// Vout index too high
VoutIndexOverflow,
}
/// Extracts the block height (most significant 3-bytes) from the `short_channel_id`
-pub fn block_from_scid(short_channel_id: &u64) -> u32 {
+pub fn block_from_scid(short_channel_id: u64) -> u32 {
return (short_channel_id >> 40) as u32;
}
/// Extracts the tx index (bytes [2..4]) from the `short_channel_id`
-pub fn tx_index_from_scid(short_channel_id: &u64) -> u32 {
+pub fn tx_index_from_scid(short_channel_id: u64) -> u32 {
return ((short_channel_id >> 16) & MAX_SCID_TX_INDEX) as u32;
}
/// Extracts the vout (bytes [0..2]) from the `short_channel_id`
-pub fn vout_from_scid(short_channel_id: &u64) -> u16 {
+pub fn vout_from_scid(short_channel_id: u64) -> u16 {
return ((short_channel_id) & MAX_SCID_VOUT_INDEX) as u16;
}
use crate::sign::EntropySource;
use crate::crypto::chacha20::ChaCha20;
use crate::util::scid_utils;
+ use crate::prelude::*;
- use core::convert::TryInto;
use core::ops::Deref;
const TEST_SEGWIT_ACTIVATION_HEIGHT: u32 = 1;
/// into the fake scid.
#[derive(Copy, Clone)]
pub(crate) enum Namespace {
+ /// Phantom nodes namespace
Phantom,
+ /// SCID aliases for outbound private channels
OutboundAlias,
+ /// Payment interception namespace
Intercept
}
/// Returns whether the given fake scid falls into the phantom namespace.
pub fn is_valid_phantom(fake_scid_rand_bytes: &[u8; 32], scid: u64, chain_hash: &ChainHash) -> bool {
- let block_height = scid_utils::block_from_scid(&scid);
- let tx_index = scid_utils::tx_index_from_scid(&scid);
+ let block_height = scid_utils::block_from_scid(scid);
+ let tx_index = scid_utils::tx_index_from_scid(scid);
let namespace = Namespace::Phantom;
let valid_vout = namespace.get_encrypted_vout(block_height, tx_index, fake_scid_rand_bytes);
block_height >= segwit_activation_height(chain_hash)
- && valid_vout == scid_utils::vout_from_scid(&scid) as u8
+ && valid_vout == scid_utils::vout_from_scid(scid) as u8
}
/// Returns whether the given fake scid falls into the intercept namespace.
pub fn is_valid_intercept(fake_scid_rand_bytes: &[u8; 32], scid: u64, chain_hash: &ChainHash) -> bool {
- let block_height = scid_utils::block_from_scid(&scid);
- let tx_index = scid_utils::tx_index_from_scid(&scid);
+ let block_height = scid_utils::block_from_scid(scid);
+ let tx_index = scid_utils::tx_index_from_scid(scid);
let namespace = Namespace::Intercept;
let valid_vout = namespace.get_encrypted_vout(block_height, tx_index, fake_scid_rand_bytes);
block_height >= segwit_activation_height(chain_hash)
- && valid_vout == scid_utils::vout_from_scid(&scid) as u8
+ && valid_vout == scid_utils::vout_from_scid(scid) as u8
}
#[cfg(test)]
let namespace = Namespace::Phantom;
let fake_scid = namespace.get_fake_scid(500_000, &mainnet_genesis, &fake_scid_rand_bytes, &keys_manager);
- let fake_height = scid_utils::block_from_scid(&fake_scid);
+ let fake_height = scid_utils::block_from_scid(fake_scid);
assert!(fake_height >= MAINNET_SEGWIT_ACTIVATION_HEIGHT);
assert!(fake_height <= 500_000);
- let fake_tx_index = scid_utils::tx_index_from_scid(&fake_scid);
+ let fake_tx_index = scid_utils::tx_index_from_scid(fake_scid);
assert!(fake_tx_index <= MAX_TX_INDEX);
- let fake_vout = scid_utils::vout_from_scid(&fake_scid);
+ let fake_vout = scid_utils::vout_from_scid(fake_scid);
assert!(fake_vout < MAX_NAMESPACES as u16);
}
}
#[test]
fn test_block_from_scid() {
- assert_eq!(block_from_scid(&0x000000_000000_0000), 0);
- assert_eq!(block_from_scid(&0x000001_000000_0000), 1);
- assert_eq!(block_from_scid(&0x000001_ffffff_ffff), 1);
- assert_eq!(block_from_scid(&0x800000_ffffff_ffff), 0x800000);
- assert_eq!(block_from_scid(&0xffffff_ffffff_ffff), 0xffffff);
+ assert_eq!(block_from_scid(0x000000_000000_0000), 0);
+ assert_eq!(block_from_scid(0x000001_000000_0000), 1);
+ assert_eq!(block_from_scid(0x000001_ffffff_ffff), 1);
+ assert_eq!(block_from_scid(0x800000_ffffff_ffff), 0x800000);
+ assert_eq!(block_from_scid(0xffffff_ffffff_ffff), 0xffffff);
}
#[test]
fn test_tx_index_from_scid() {
- assert_eq!(tx_index_from_scid(&0x000000_000000_0000), 0);
- assert_eq!(tx_index_from_scid(&0x000000_000001_0000), 1);
- assert_eq!(tx_index_from_scid(&0xffffff_000001_ffff), 1);
- assert_eq!(tx_index_from_scid(&0xffffff_800000_ffff), 0x800000);
- assert_eq!(tx_index_from_scid(&0xffffff_ffffff_ffff), 0xffffff);
+ assert_eq!(tx_index_from_scid(0x000000_000000_0000), 0);
+ assert_eq!(tx_index_from_scid(0x000000_000001_0000), 1);
+ assert_eq!(tx_index_from_scid(0xffffff_000001_ffff), 1);
+ assert_eq!(tx_index_from_scid(0xffffff_800000_ffff), 0x800000);
+ assert_eq!(tx_index_from_scid(0xffffff_ffffff_ffff), 0xffffff);
}
#[test]
fn test_vout_from_scid() {
- assert_eq!(vout_from_scid(&0x000000_000000_0000), 0);
- assert_eq!(vout_from_scid(&0x000000_000000_0001), 1);
- assert_eq!(vout_from_scid(&0xffffff_ffffff_0001), 1);
- assert_eq!(vout_from_scid(&0xffffff_ffffff_8000), 0x8000);
- assert_eq!(vout_from_scid(&0xffffff_ffffff_ffff), 0xffff);
+ assert_eq!(vout_from_scid(0x000000_000000_0000), 0);
+ assert_eq!(vout_from_scid(0x000000_000000_0001), 1);
+ assert_eq!(vout_from_scid(0xffffff_ffffff_0001), 1);
+ assert_eq!(vout_from_scid(0xffffff_ffffff_8000), 0x8000);
+ assert_eq!(vout_from_scid(0xffffff_ffffff_ffff), 0xffff);
}
#[test]
use core::hash::Hash;
use crate::sync::{Mutex, RwLock};
use core::cmp;
-use core::convert::TryFrom;
use core::ops::Deref;
use alloc::collections::BTreeMap;
use bitcoin::consensus::Encodable;
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
use bitcoin::hash_types::{Txid, BlockHash};
-use core::marker::Sized;
use core::time::Duration;
use crate::chain::ClaimId;
use crate::ln::msgs::DecodeError;
/// forward to ensure we always consume exactly the fixed length specified.
///
/// This is not exported to bindings users as manual TLV building is not currently supported in bindings
-pub struct FixedLengthReader<R: Read> {
- read: R,
+pub struct FixedLengthReader<'a, R: Read> {
+ read: &'a mut R,
bytes_read: u64,
total_bytes: u64,
}
-impl<R: Read> FixedLengthReader<R> {
+impl<'a, R: Read> FixedLengthReader<'a, R> {
/// Returns a new [`FixedLengthReader`].
- pub fn new(read: R, total_bytes: u64) -> Self {
+ pub fn new(read: &'a mut R, total_bytes: u64) -> Self {
Self { read, bytes_read: 0, total_bytes }
}
}
}
}
-impl<R: Read> Read for FixedLengthReader<R> {
+impl<'a, R: Read> Read for FixedLengthReader<'a, R> {
#[inline]
fn read(&mut self, dest: &mut [u8]) -> Result<usize, io::Error> {
if self.total_bytes == self.bytes_read {
}
}
-impl<R: Read> LengthRead for FixedLengthReader<R> {
+impl<'a, R: Read> LengthRead for FixedLengthReader<'a, R> {
#[inline]
fn total_bytes(&self) -> u64 {
self.total_bytes
}
impl_for_map!(BTreeMap, Ord, |_| BTreeMap::new());
-impl_for_map!(HashMap, Hash, |len| HashMap::with_capacity(len));
+impl_for_map!(HashMap, Hash, |len| hash_map_with_capacity(len));
// HashSet
impl<T> Writeable for HashSet<T>
#[inline]
fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
let len: CollectionLength = Readable::read(r)?;
- let mut ret = HashSet::with_capacity(cmp::min(len.0 as usize, MAX_BUF_SIZE / core::mem::size_of::<T>()));
+ let mut ret = hash_set_with_capacity(cmp::min(len.0 as usize, MAX_BUF_SIZE / core::mem::size_of::<T>()));
for _ in 0..len.0 {
if !ret.insert(T::read(r)?) {
return Err(DecodeError::InvalidValue)
}
}
+// Alternatives to impl_writeable_for_vec/impl_readable_for_vec that add a length prefix to each
+// element in the Vec. Intended to be used when elements have variable lengths.
+macro_rules! impl_writeable_for_vec_with_element_length_prefix {
+ ($ty: ty $(, $name: ident)*) => {
+ impl<$($name : Writeable),*> Writeable for Vec<$ty> {
+ #[inline]
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ CollectionLength(self.len() as u64).write(w)?;
+ for elem in self.iter() {
+ CollectionLength(elem.serialized_length() as u64).write(w)?;
+ elem.write(w)?;
+ }
+ Ok(())
+ }
+ }
+ }
+}
+macro_rules! impl_readable_for_vec_with_element_length_prefix {
+ ($ty: ty $(, $name: ident)*) => {
+ impl<$($name : Readable),*> Readable for Vec<$ty> {
+ #[inline]
+ fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let len: CollectionLength = Readable::read(r)?;
+ let mut ret = Vec::with_capacity(cmp::min(len.0 as usize, MAX_BUF_SIZE / core::mem::size_of::<$ty>()));
+ for _ in 0..len.0 {
+ let elem_len: CollectionLength = Readable::read(r)?;
+ let mut elem_reader = FixedLengthReader::new(r, elem_len.0);
+ if let Some(val) = MaybeReadable::read(&mut elem_reader)? {
+ ret.push(val);
+ }
+ }
+ Ok(ret)
+ }
+ }
+ }
+}
+macro_rules! impl_for_vec_with_element_length_prefix {
+ ($ty: ty $(, $name: ident)*) => {
+ impl_writeable_for_vec_with_element_length_prefix!($ty $(, $name)*);
+ impl_readable_for_vec_with_element_length_prefix!($ty $(, $name)*);
+ }
+}
+
impl Writeable for Vec<u8> {
#[inline]
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
impl_for_vec!(ecdsa::Signature);
impl_for_vec!(crate::chain::channelmonitor::ChannelMonitorUpdate);
impl_for_vec!(crate::ln::channelmanager::MonitorUpdateCompletionAction);
+impl_for_vec!(crate::ln::msgs::SocketAddress);
impl_for_vec!((A, B), A, B);
impl_writeable_for_vec!(&crate::routing::router::BlindedTail);
impl_readable_for_vec!(crate::routing::router::BlindedTail);
+impl_for_vec_with_element_length_prefix!(crate::ln::msgs::UpdateAddHTLC);
+impl_writeable_for_vec_with_element_length_prefix!(&crate::ln::msgs::UpdateAddHTLC);
impl Writeable for Vec<Witness> {
#[inline]
pub fn into_transaction(self) -> Transaction {
self.0
}
+
+ /// Returns a reference to the contained `Transaction`
+ pub fn as_transaction(&self) -> &Transaction {
+ &self.0
+ }
}
impl Writeable for TransactionU16LenLimited {
#[cfg(test)]
mod tests {
- use core::convert::TryFrom;
use bitcoin::hashes::hex::FromHex;
use bitcoin::secp256k1::ecdsa;
use crate::util::ser::{Readable, Hostname, Writeable};
+ use crate::prelude::*;
#[test]
fn hostname_conversion() {
#[doc(hidden)]
#[macro_export]
macro_rules! _decode_tlv {
- ($reader: expr, $field: ident, (default_value, $default: expr)) => {{
- $crate::_decode_tlv!($reader, $field, required)
+ ($outer_reader: expr, $reader: expr, $field: ident, (default_value, $default: expr)) => {{
+ $crate::_decode_tlv!($outer_reader, $reader, $field, required)
}};
- ($reader: expr, $field: ident, (static_value, $value: expr)) => {{
+ ($outer_reader: expr, $reader: expr, $field: ident, (static_value, $value: expr)) => {{
}};
- ($reader: expr, $field: ident, required) => {{
+ ($outer_reader: expr, $reader: expr, $field: ident, required) => {{
$field = $crate::util::ser::Readable::read(&mut $reader)?;
}};
- ($reader: expr, $field: ident, (required: $trait: ident $(, $read_arg: expr)?)) => {{
+ ($outer_reader: expr, $reader: expr, $field: ident, (required: $trait: ident $(, $read_arg: expr)?)) => {{
$field = $trait::read(&mut $reader $(, $read_arg)*)?;
}};
- ($reader: expr, $field: ident, required_vec) => {{
+ ($outer_reader: expr, $reader: expr, $field: ident, required_vec) => {{
let f: $crate::util::ser::WithoutLength<Vec<_>> = $crate::util::ser::Readable::read(&mut $reader)?;
$field = f.0;
}};
- ($reader: expr, $field: ident, option) => {{
+ ($outer_reader: expr, $reader: expr, $field: ident, option) => {{
$field = Some($crate::util::ser::Readable::read(&mut $reader)?);
}};
- ($reader: expr, $field: ident, optional_vec) => {{
+ ($outer_reader: expr, $reader: expr, $field: ident, optional_vec) => {{
let f: $crate::util::ser::WithoutLength<Vec<_>> = $crate::util::ser::Readable::read(&mut $reader)?;
$field = Some(f.0);
}};
// without backwards compat. We'll error if the field is missing, and return `Ok(None)` if the
// field is present but we can no longer understand it.
// Note that this variant can only be used within a `MaybeReadable` read.
- ($reader: expr, $field: ident, upgradable_required) => {{
+ ($outer_reader: expr, $reader: expr, $field: ident, upgradable_required) => {{
$field = match $crate::util::ser::MaybeReadable::read(&mut $reader)? {
Some(res) => res,
- _ => return Ok(None)
+ None => {
+ // If we successfully read a value but we don't know how to parse it, we give up
+ // and immediately return `None`. However, we need to make sure we read the correct
+ // number of bytes for this TLV stream, which is implicitly the end of the stream.
+ // Thus, we consume everything left in the `$outer_reader` here, ensuring that if
+ // we're being read as a part of another TLV stream we don't spuriously fail to
+ // deserialize the outer object due to a TLV length mismatch.
+ $crate::io_extras::copy($outer_reader, &mut $crate::io_extras::sink()).unwrap();
+ return Ok(None)
+ },
};
}};
// `upgradable_option` indicates we're reading an Option-al TLV that may have been upgraded
// without backwards compat. $field will be None if the TLV is missing or if the field is present
// but we can no longer understand it.
- ($reader: expr, $field: ident, upgradable_option) => {{
+ ($outer_reader: expr, $reader: expr, $field: ident, upgradable_option) => {{
$field = $crate::util::ser::MaybeReadable::read(&mut $reader)?;
+ if $field.is_none() {
+ #[cfg(not(debug_assertions))] {
+ // In general, MaybeReadable implementations are required to consume all the bytes
+ // of the object even if they don't understand it, but due to a bug in the
+ // serialization format for `impl_writeable_tlv_based_enum_upgradable` we sometimes
+ // don't know how many bytes that is. In such cases, we'd like to spuriously allow
+ // TLV length mismatches, which we do here by calling `eat_remaining` so that the
+ // `s.bytes_remain()` check in `_decode_tlv_stream_range` doesn't fail.
+ $reader.eat_remaining()?;
+ }
+ }
}};
- ($reader: expr, $field: ident, (option: $trait: ident $(, $read_arg: expr)?)) => {{
+ ($outer_reader: expr, $reader: expr, $field: ident, (option: $trait: ident $(, $read_arg: expr)?)) => {{
$field = Some($trait::read(&mut $reader $(, $read_arg)*)?);
}};
- ($reader: expr, $field: ident, (option, encoding: ($fieldty: ty, $encoding: ident, $encoder:ty))) => {{
- $crate::_decode_tlv!($reader, $field, (option, encoding: ($fieldty, $encoding)));
+ ($outer_reader: expr, $reader: expr, $field: ident, (option, encoding: ($fieldty: ty, $encoding: ident, $encoder:ty))) => {{
+ $crate::_decode_tlv!($outer_reader, $reader, $field, (option, encoding: ($fieldty, $encoding)));
}};
- ($reader: expr, $field: ident, (option, encoding: ($fieldty: ty, $encoding: ident))) => {{
+ ($outer_reader: expr, $reader: expr, $field: ident, (option, encoding: ($fieldty: ty, $encoding: ident))) => {{
$field = {
let field: $encoding<$fieldty> = ser::Readable::read(&mut $reader)?;
Some(field.0)
};
}};
- ($reader: expr, $field: ident, (option, encoding: $fieldty: ty)) => {{
- $crate::_decode_tlv!($reader, $field, option);
+ ($outer_reader: expr, $reader: expr, $field: ident, (option, encoding: $fieldty: ty)) => {{
+ $crate::_decode_tlv!($outer_reader, $reader, $field, option);
}};
}
let mut s = ser::FixedLengthReader::new(&mut stream_ref, length.0);
match typ.0 {
$(_t if $crate::_decode_tlv_stream_match_check!(_t, $type, $fieldty) => {
- $crate::_decode_tlv!(s, $field, $fieldty);
+ $crate::_decode_tlv!($stream, s, $field, $fieldty);
if s.bytes_remain() {
s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
return Err(DecodeError::InvalidValue);
$($variant_id => {
// Because read_tlv_fields creates a labeled loop, we cannot call it twice
// in the same function body. Instead, we define a closure and call it.
- let f = || {
+ let mut f = || {
$crate::_init_and_read_len_prefixed_tlv_fields!(reader, {
$(($type, $field, $fieldty)),*
});
/// when [`MaybeReadable`] is practical instead of just [`Readable`] as it provides an upgrade path for
/// new variants to be added which are simply ignored by existing clients.
///
+/// Note that only struct and unit variants (not tuple variants) will support downgrading, thus any
+/// new odd variants MUST be non-tuple (i.e. described using `$variant_id` and `$variant_name` not
+/// `$tuple_variant_id` and `$tuple_variant_name`).
+///
/// [`MaybeReadable`]: crate::util::ser::MaybeReadable
/// [`Writeable`]: crate::util::ser::Writeable
/// [`DecodeError::UnknownRequiredFeature`]: crate::ln::msgs::DecodeError::UnknownRequiredFeature
$($variant_id => {
// Because read_tlv_fields creates a labeled loop, we cannot call it twice
// in the same function body. Instead, we define a closure and call it.
- let f = || {
+ let mut f = || {
$crate::_init_and_read_len_prefixed_tlv_fields!(reader, {
$(($type, $field, $fieldty)),*
});
$($($tuple_variant_id => {
Ok(Some($st::$tuple_variant_name(Readable::read(reader)?)))
}),*)*
- _ if id % 2 == 1 => Ok(None),
+ _ if id % 2 == 1 => {
+ // Assume that a $variant_id was written, not a $tuple_variant_id, and read
+ // the length prefix and discard the correct number of bytes.
+ let tlv_len: $crate::util::ser::BigSize = $crate::util::ser::Readable::read(reader)?;
+ let mut rd = $crate::util::ser::FixedLengthReader::new(reader, tlv_len.0);
+ rd.eat_remaining().map_err(|_| $crate::ln::msgs::DecodeError::ShortRead)?;
+ Ok(None)
+ },
_ => Err($crate::ln::msgs::DecodeError::UnknownRequiredFeature),
}
}
#[cfg(test)]
mod tests {
- use crate::io::{self, Cursor};
+ #[allow(unused_imports)]
use crate::prelude::*;
+
+ use crate::io::{self, Cursor};
use crate::ln::msgs::DecodeError;
- use crate::util::ser::{Writeable, HighZeroBytesDroppedBigSize, VecWriter};
+ use crate::util::ser::{MaybeReadable, Readable, Writeable, HighZeroBytesDroppedBigSize, VecWriter};
use bitcoin::hashes::hex::FromHex;
use bitcoin::secp256k1::PublicKey;
} else { panic!(); }
}
+ /// A "V1" enum with only one variant
+ enum InnerEnumV1 {
+ StructVariantA {
+ field: u32,
+ },
+ }
+
+ impl_writeable_tlv_based_enum_upgradable!(InnerEnumV1,
+ (0, StructVariantA) => {
+ (0, field, required),
+ },
+ );
+
+ struct OuterStructOptionalEnumV1 {
+ inner_enum: Option<InnerEnumV1>,
+ other_field: u32,
+ }
+
+ impl_writeable_tlv_based!(OuterStructOptionalEnumV1, {
+ (0, inner_enum, upgradable_option),
+ (2, other_field, required),
+ });
+
+ /// An upgraded version of [`InnerEnumV1`] that added a second variant
+ enum InnerEnumV2 {
+ StructVariantA {
+ field: u32,
+ },
+ StructVariantB {
+ field2: u64,
+ }
+ }
+
+ impl_writeable_tlv_based_enum_upgradable!(InnerEnumV2,
+ (0, StructVariantA) => {
+ (0, field, required),
+ },
+ (1, StructVariantB) => {
+ (0, field2, required),
+ },
+ );
+
+ struct OuterStructOptionalEnumV2 {
+ inner_enum: Option<InnerEnumV2>,
+ other_field: u32,
+ }
+
+ impl_writeable_tlv_based!(OuterStructOptionalEnumV2, {
+ (0, inner_enum, upgradable_option),
+ (2, other_field, required),
+ });
+
+ #[test]
+ fn upgradable_enum_option() {
+ // Test downgrading from `OuterStructOptionalEnumV2` to `OuterStructOptionalEnumV1` and
+ // ensure we still read the `other_field` just fine.
+ let serialized_bytes = OuterStructOptionalEnumV2 {
+ inner_enum: Some(InnerEnumV2::StructVariantB { field2: 64 }),
+ other_field: 0x1bad1dea,
+ }.encode();
+ let mut s = Cursor::new(serialized_bytes);
+
+ let outer_struct: OuterStructOptionalEnumV1 = Readable::read(&mut s).unwrap();
+ assert!(outer_struct.inner_enum.is_none());
+ assert_eq!(outer_struct.other_field, 0x1bad1dea);
+ }
+
+ /// A struct that is read with an [`InnerEnumV1`] but is written with an [`InnerEnumV2`].
+ struct OuterStructRequiredEnum {
+ #[allow(unused)]
+ inner_enum: InnerEnumV1,
+ }
+
+ impl MaybeReadable for OuterStructRequiredEnum {
+ fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
+ let mut inner_enum = crate::util::ser::UpgradableRequired(None);
+ read_tlv_fields!(reader, {
+ (0, inner_enum, upgradable_required),
+ });
+ Ok(Some(Self {
+ inner_enum: inner_enum.0.unwrap(),
+ }))
+ }
+ }
+
+ impl Writeable for OuterStructRequiredEnum {
+ fn write<W: crate::util::ser::Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+ write_tlv_fields!(writer, {
+ (0, InnerEnumV2::StructVariantB { field2: 0xdeadbeef }, required),
+ });
+ Ok(())
+ }
+ }
+
+ struct OuterOuterStruct {
+ outer_struct: Option<OuterStructRequiredEnum>,
+ other_field: u32,
+ }
+
+ impl_writeable_tlv_based!(OuterOuterStruct, {
+ (0, outer_struct, upgradable_option),
+ (2, other_field, required),
+ });
+
+
+ #[test]
+ fn upgradable_enum_required() {
+ // Test downgrading from an `OuterOuterStruct` (i.e. test downgrading an
+ // `upgradable_required` `InnerEnumV2` to an `InnerEnumV1`).
+ //
+ // Note that `OuterStructRequiredEnum` has a split write/read implementation that writes an
+ // `InnerEnumV2::StructVariantB` irrespective of the value of `inner_enum`.
+
+ let dummy_inner_enum = InnerEnumV1::StructVariantA { field: 42 };
+ let serialized_bytes = OuterOuterStruct {
+ outer_struct: Some(OuterStructRequiredEnum { inner_enum: dummy_inner_enum }),
+ other_field: 0x1bad1dea,
+ }.encode();
+ let mut s = Cursor::new(serialized_bytes);
+
+ let outer_outer_struct: OuterOuterStruct = Readable::read(&mut s).unwrap();
+ assert!(outer_outer_struct.outer_struct.is_none());
+ assert_eq!(outer_outer_struct.other_field, 0x1bad1dea);
+ }
+
// BOLT TLV test cases
fn tlv_reader_n1(s: &[u8]) -> Result<(Option<HighZeroBytesDroppedBigSize<u64>>, Option<u64>, Option<(PublicKey, u64, u64)>, Option<u16>), DecodeError> {
let mut s = Cursor::new(s);
//! Utilities for strings.
-use alloc::string::String;
use core::fmt;
use crate::io::{self, Read};
use crate::ln::msgs;
use crate::util::ser::{Writeable, Writer, Readable};
+#[allow(unused_imports)]
+use crate::prelude::*;
+
/// Struct to `Display` fields in a safe way using `PrintableString`
#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Default)]
pub struct UntrustedString(pub String);
--- /dev/null
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! This module contains an [`OutputSweeper`] utility that keeps track of
+//! [`SpendableOutputDescriptor`]s, i.e., persists them in a given [`KVStore`] and regularly retries
+//! sweeping them.
+
+use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
+use crate::chain::channelmonitor::ANTI_REORG_DELAY;
+use crate::chain::{self, BestBlock, Confirm, Filter, Listen, WatchedOutput};
+use crate::io;
+use crate::ln::msgs::DecodeError;
+use crate::ln::ChannelId;
+use crate::prelude::Vec;
+use crate::sign::{ChangeDestinationSource, OutputSpender, SpendableOutputDescriptor};
+use crate::sync::Mutex;
+use crate::util::logger::Logger;
+use crate::util::persist::{
+ KVStore, OUTPUT_SWEEPER_PERSISTENCE_KEY, OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE,
+ OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE,
+};
+use crate::util::ser::{Readable, ReadableArgs, Writeable};
+use crate::{impl_writeable_tlv_based, log_debug, log_error};
+
+use bitcoin::blockdata::block::Header;
+use bitcoin::blockdata::locktime::absolute::LockTime;
+use bitcoin::secp256k1::Secp256k1;
+use bitcoin::{BlockHash, Transaction, Txid};
+
+use core::ops::Deref;
+
+/// The state of a spendable output currently tracked by an [`OutputSweeper`].
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct TrackedSpendableOutput {
+ /// The tracked output descriptor.
+ pub descriptor: SpendableOutputDescriptor,
+ /// The channel this output belongs to.
+ ///
+ /// Will be `None` if no `channel_id` was given to [`OutputSweeper::track_spendable_outputs`]
+ pub channel_id: Option<ChannelId>,
+ /// The current status of the output spend.
+ pub status: OutputSpendStatus,
+}
+
+impl TrackedSpendableOutput {
+ fn to_watched_output(&self, cur_hash: BlockHash) -> WatchedOutput {
+ let block_hash = self.status.first_broadcast_hash().or(Some(cur_hash));
+ match &self.descriptor {
+ SpendableOutputDescriptor::StaticOutput { outpoint, output, channel_keys_id: _ } => {
+ WatchedOutput {
+ block_hash,
+ outpoint: *outpoint,
+ script_pubkey: output.script_pubkey.clone(),
+ }
+ },
+ SpendableOutputDescriptor::DelayedPaymentOutput(output) => WatchedOutput {
+ block_hash,
+ outpoint: output.outpoint,
+ script_pubkey: output.output.script_pubkey.clone(),
+ },
+ SpendableOutputDescriptor::StaticPaymentOutput(output) => WatchedOutput {
+ block_hash,
+ outpoint: output.outpoint,
+ script_pubkey: output.output.script_pubkey.clone(),
+ },
+ }
+ }
+
+ /// Returns whether the output is spent in the given transaction.
+ pub fn is_spent_in(&self, tx: &Transaction) -> bool {
+ let prev_outpoint = match &self.descriptor {
+ SpendableOutputDescriptor::StaticOutput { outpoint, .. } => *outpoint,
+ SpendableOutputDescriptor::DelayedPaymentOutput(output) => output.outpoint,
+ SpendableOutputDescriptor::StaticPaymentOutput(output) => output.outpoint,
+ }
+ .into_bitcoin_outpoint();
+
+ tx.input.iter().any(|input| input.previous_output == prev_outpoint)
+ }
+}
+
+impl_writeable_tlv_based!(TrackedSpendableOutput, {
+ (0, descriptor, required),
+ (2, channel_id, option),
+ (4, status, required),
+});
+
+/// The current status of the output spend.
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub enum OutputSpendStatus {
+ /// The output is tracked but an initial spending transaction hasn't been generated and
+ /// broadcasted yet.
+ PendingInitialBroadcast {
+ /// The height at which we will first generate and broadcast a spending transaction.
+ delayed_until_height: Option<u32>,
+ },
+ /// A transaction spending the output has been broadcasted but is pending its first confirmation on-chain.
+ PendingFirstConfirmation {
+ /// The hash of the chain tip when we first broadcast a transaction spending this output.
+ first_broadcast_hash: BlockHash,
+ /// The best height when we last broadcast a transaction spending this output.
+ latest_broadcast_height: u32,
+ /// The transaction spending this output we last broadcasted.
+ latest_spending_tx: Transaction,
+ },
+ /// A transaction spending the output has been confirmed on-chain but will be tracked until it
+ /// reaches [`ANTI_REORG_DELAY`] confirmations.
+ PendingThresholdConfirmations {
+ /// The hash of the chain tip when we first broadcast a transaction spending this output.
+ first_broadcast_hash: BlockHash,
+ /// The best height when we last broadcast a transaction spending this output.
+ latest_broadcast_height: u32,
+ /// The transaction spending this output we saw confirmed on-chain.
+ latest_spending_tx: Transaction,
+ /// The height at which the spending transaction was confirmed.
+ confirmation_height: u32,
+ /// The hash of the block in which the spending transaction was confirmed.
+ confirmation_hash: BlockHash,
+ },
+}
+
+impl OutputSpendStatus {
+ fn broadcast(&mut self, cur_hash: BlockHash, cur_height: u32, latest_spending_tx: Transaction) {
+ match self {
+ Self::PendingInitialBroadcast { delayed_until_height } => {
+ if let Some(delayed_until_height) = delayed_until_height {
+ debug_assert!(
+ cur_height >= *delayed_until_height,
+ "We should never broadcast before the required height is reached."
+ );
+ }
+ *self = Self::PendingFirstConfirmation {
+ first_broadcast_hash: cur_hash,
+ latest_broadcast_height: cur_height,
+ latest_spending_tx,
+ };
+ },
+ Self::PendingFirstConfirmation { first_broadcast_hash, .. } => {
+ *self = Self::PendingFirstConfirmation {
+ first_broadcast_hash: *first_broadcast_hash,
+ latest_broadcast_height: cur_height,
+ latest_spending_tx,
+ };
+ },
+ Self::PendingThresholdConfirmations { .. } => {
+ debug_assert!(false, "We should never rebroadcast confirmed transactions.");
+ },
+ }
+ }
+
+ fn confirmed(
+ &mut self, confirmation_hash: BlockHash, confirmation_height: u32,
+ latest_spending_tx: Transaction,
+ ) {
+ match self {
+ Self::PendingInitialBroadcast { .. } => {
+ // Generally we can't see any of our transactions confirmed if they haven't been
+ // broadcasted yet, so this should never be reachable via `transactions_confirmed`.
+ debug_assert!(false, "We should never confirm when we haven't broadcasted. This a bug and should never happen, please report.");
+ *self = Self::PendingThresholdConfirmations {
+ first_broadcast_hash: confirmation_hash,
+ latest_broadcast_height: confirmation_height,
+ latest_spending_tx,
+ confirmation_height,
+ confirmation_hash,
+ };
+ },
+ Self::PendingFirstConfirmation {
+ first_broadcast_hash,
+ latest_broadcast_height,
+ ..
+ } => {
+ debug_assert!(confirmation_height >= *latest_broadcast_height);
+ *self = Self::PendingThresholdConfirmations {
+ first_broadcast_hash: *first_broadcast_hash,
+ latest_broadcast_height: *latest_broadcast_height,
+ latest_spending_tx,
+ confirmation_height,
+ confirmation_hash,
+ };
+ },
+ Self::PendingThresholdConfirmations {
+ first_broadcast_hash,
+ latest_broadcast_height,
+ ..
+ } => {
+ *self = Self::PendingThresholdConfirmations {
+ first_broadcast_hash: *first_broadcast_hash,
+ latest_broadcast_height: *latest_broadcast_height,
+ latest_spending_tx,
+ confirmation_height,
+ confirmation_hash,
+ };
+ },
+ }
+ }
+
+ fn unconfirmed(&mut self) {
+ match self {
+ Self::PendingInitialBroadcast { .. } => {
+ debug_assert!(
+ false,
+ "We should only mark a spend as unconfirmed if it used to be confirmed."
+ );
+ },
+ Self::PendingFirstConfirmation { .. } => {
+ debug_assert!(
+ false,
+ "We should only mark a spend as unconfirmed if it used to be confirmed."
+ );
+ },
+ Self::PendingThresholdConfirmations {
+ first_broadcast_hash,
+ latest_broadcast_height,
+ latest_spending_tx,
+ ..
+ } => {
+ *self = Self::PendingFirstConfirmation {
+ first_broadcast_hash: *first_broadcast_hash,
+ latest_broadcast_height: *latest_broadcast_height,
+ latest_spending_tx: latest_spending_tx.clone(),
+ };
+ },
+ }
+ }
+
+ fn is_delayed(&self, cur_height: u32) -> bool {
+ match self {
+ Self::PendingInitialBroadcast { delayed_until_height } => {
+ delayed_until_height.map_or(false, |req_height| cur_height < req_height)
+ },
+ Self::PendingFirstConfirmation { .. } => false,
+ Self::PendingThresholdConfirmations { .. } => false,
+ }
+ }
+
+ fn first_broadcast_hash(&self) -> Option<BlockHash> {
+ match self {
+ Self::PendingInitialBroadcast { .. } => None,
+ Self::PendingFirstConfirmation { first_broadcast_hash, .. } => {
+ Some(*first_broadcast_hash)
+ },
+ Self::PendingThresholdConfirmations { first_broadcast_hash, .. } => {
+ Some(*first_broadcast_hash)
+ },
+ }
+ }
+
+ fn latest_broadcast_height(&self) -> Option<u32> {
+ match self {
+ Self::PendingInitialBroadcast { .. } => None,
+ Self::PendingFirstConfirmation { latest_broadcast_height, .. } => {
+ Some(*latest_broadcast_height)
+ },
+ Self::PendingThresholdConfirmations { latest_broadcast_height, .. } => {
+ Some(*latest_broadcast_height)
+ },
+ }
+ }
+
+ fn confirmation_height(&self) -> Option<u32> {
+ match self {
+ Self::PendingInitialBroadcast { .. } => None,
+ Self::PendingFirstConfirmation { .. } => None,
+ Self::PendingThresholdConfirmations { confirmation_height, .. } => {
+ Some(*confirmation_height)
+ },
+ }
+ }
+
+ fn confirmation_hash(&self) -> Option<BlockHash> {
+ match self {
+ Self::PendingInitialBroadcast { .. } => None,
+ Self::PendingFirstConfirmation { .. } => None,
+ Self::PendingThresholdConfirmations { confirmation_hash, .. } => {
+ Some(*confirmation_hash)
+ },
+ }
+ }
+
+ fn latest_spending_tx(&self) -> Option<&Transaction> {
+ match self {
+ Self::PendingInitialBroadcast { .. } => None,
+ Self::PendingFirstConfirmation { latest_spending_tx, .. } => Some(latest_spending_tx),
+ Self::PendingThresholdConfirmations { latest_spending_tx, .. } => {
+ Some(latest_spending_tx)
+ },
+ }
+ }
+
+ fn is_confirmed(&self) -> bool {
+ match self {
+ Self::PendingInitialBroadcast { .. } => false,
+ Self::PendingFirstConfirmation { .. } => false,
+ Self::PendingThresholdConfirmations { .. } => true,
+ }
+ }
+}
+
+impl_writeable_tlv_based_enum!(OutputSpendStatus,
+ (0, PendingInitialBroadcast) => {
+ (0, delayed_until_height, option),
+ },
+ (2, PendingFirstConfirmation) => {
+ (0, first_broadcast_hash, required),
+ (2, latest_broadcast_height, required),
+ (4, latest_spending_tx, required),
+ },
+ (4, PendingThresholdConfirmations) => {
+ (0, first_broadcast_hash, required),
+ (2, latest_broadcast_height, required),
+ (4, latest_spending_tx, required),
+ (6, confirmation_height, required),
+ (8, confirmation_hash, required),
+ };
+);
+
+/// A utility that keeps track of [`SpendableOutputDescriptor`]s, persists them in a given
+/// [`KVStore`] and regularly retries sweeping them based on a callback given to the constructor
+/// methods.
+///
+/// Users should call [`Self::track_spendable_outputs`] for any [`SpendableOutputDescriptor`]s received via [`Event::SpendableOutputs`].
+///
+/// This needs to be notified of chain state changes either via its [`Listen`] or [`Confirm`]
+/// implementation and hence has to be connected with the utilized chain data sources.
+///
+/// If chain data is provided via the [`Confirm`] interface or via filtered blocks, users are
+/// required to give their chain data sources (i.e., [`Filter`] implementation) to the respective
+/// constructor.
+///
+/// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs
+pub struct OutputSweeper<B: Deref, D: Deref, E: Deref, F: Deref, K: Deref, L: Deref, O: Deref>
+where
+ B::Target: BroadcasterInterface,
+ D::Target: ChangeDestinationSource,
+ E::Target: FeeEstimator,
+ F::Target: Filter + Sync + Send,
+ K::Target: KVStore,
+ L::Target: Logger,
+ O::Target: OutputSpender,
+{
+ sweeper_state: Mutex<SweeperState>,
+ broadcaster: B,
+ fee_estimator: E,
+ chain_data_source: Option<F>,
+ output_spender: O,
+ change_destination_source: D,
+ kv_store: K,
+ logger: L,
+}
+
+impl<B: Deref, D: Deref, E: Deref, F: Deref, K: Deref, L: Deref, O: Deref>
+ OutputSweeper<B, D, E, F, K, L, O>
+where
+ B::Target: BroadcasterInterface,
+ D::Target: ChangeDestinationSource,
+ E::Target: FeeEstimator,
+ F::Target: Filter + Sync + Send,
+ K::Target: KVStore,
+ L::Target: Logger,
+ O::Target: OutputSpender,
+{
+ /// Constructs a new [`OutputSweeper`].
+ ///
+ /// If chain data is provided via the [`Confirm`] interface or via filtered blocks, users also
+ /// need to register their [`Filter`] implementation via the given `chain_data_source`.
+ pub fn new(
+ best_block: BestBlock, broadcaster: B, fee_estimator: E, chain_data_source: Option<F>,
+ output_spender: O, change_destination_source: D, kv_store: K, logger: L,
+ ) -> Self {
+ let outputs = Vec::new();
+ let sweeper_state = Mutex::new(SweeperState { outputs, best_block });
+ Self {
+ sweeper_state,
+ broadcaster,
+ fee_estimator,
+ chain_data_source,
+ output_spender,
+ change_destination_source,
+ kv_store,
+ logger,
+ }
+ }
+
+ /// Tells the sweeper to track the given outputs descriptors.
+ ///
+ /// Usually, this should be called based on the values emitted by the
+ /// [`Event::SpendableOutputs`].
+ ///
+ /// The given `exclude_static_outputs` flag controls whether the sweeper will filter out
+ /// [`SpendableOutputDescriptor::StaticOutput`]s, which may be handled directly by the on-chain
+ /// wallet implementation.
+ ///
+ /// If `delay_until_height` is set, we will delay the spending until the respective block
+ /// height is reached. This can be used to batch spends, e.g., to reduce on-chain fees.
+ ///
+ /// Returns `Err` on persistence failure, in which case the call may be safely retried.
+ ///
+ /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs
+ pub fn track_spendable_outputs(
+ &self, output_descriptors: Vec<SpendableOutputDescriptor>, channel_id: Option<ChannelId>,
+ exclude_static_outputs: bool, delay_until_height: Option<u32>,
+ ) -> Result<(), ()> {
+ let mut relevant_descriptors = output_descriptors
+ .into_iter()
+ .filter(|desc| {
+ !(exclude_static_outputs
+ && matches!(desc, SpendableOutputDescriptor::StaticOutput { .. }))
+ })
+ .peekable();
+
+ if relevant_descriptors.peek().is_none() {
+ return Ok(());
+ }
+
+ let spending_tx_opt;
+ {
+ let mut state_lock = self.sweeper_state.lock().unwrap();
+ for descriptor in relevant_descriptors {
+ let output_info = TrackedSpendableOutput {
+ descriptor,
+ channel_id,
+ status: OutputSpendStatus::PendingInitialBroadcast {
+ delayed_until_height: delay_until_height,
+ },
+ };
+
+ if state_lock
+ .outputs
+ .iter()
+ .find(|o| o.descriptor == output_info.descriptor)
+ .is_some()
+ {
+ continue;
+ }
+
+ state_lock.outputs.push(output_info);
+ }
+ spending_tx_opt = self.regenerate_spend_if_necessary(&mut *state_lock);
+ self.persist_state(&*state_lock).map_err(|e| {
+ log_error!(self.logger, "Error persisting OutputSweeper: {:?}", e);
+ })?;
+ }
+
+ if let Some(spending_tx) = spending_tx_opt {
+ self.broadcaster.broadcast_transactions(&[&spending_tx]);
+ }
+
+ Ok(())
+ }
+
+ /// Returns a list of the currently tracked spendable outputs.
+ pub fn tracked_spendable_outputs(&self) -> Vec<TrackedSpendableOutput> {
+ self.sweeper_state.lock().unwrap().outputs.clone()
+ }
+
+ /// Gets the latest best block which was connected either via the [`Listen`] or
+ /// [`Confirm`] interfaces.
+ pub fn current_best_block(&self) -> BestBlock {
+ self.sweeper_state.lock().unwrap().best_block
+ }
+
+ fn regenerate_spend_if_necessary(
+ &self, sweeper_state: &mut SweeperState,
+ ) -> Option<Transaction> {
+ let cur_height = sweeper_state.best_block.height;
+ let cur_hash = sweeper_state.best_block.block_hash;
+ let filter_fn = |o: &TrackedSpendableOutput| {
+ if o.status.is_confirmed() {
+ // Don't rebroadcast confirmed txs.
+ return false;
+ }
+
+ if o.status.is_delayed(cur_height) {
+ // Don't generate and broadcast if still delayed
+ return false;
+ }
+
+ if o.status.latest_broadcast_height() >= Some(cur_height) {
+ // Only broadcast once per block height.
+ return false;
+ }
+
+ true
+ };
+
+ let respend_descriptors: Vec<&SpendableOutputDescriptor> =
+ sweeper_state.outputs.iter().filter(|o| filter_fn(*o)).map(|o| &o.descriptor).collect();
+
+ if respend_descriptors.is_empty() {
+ // Nothing to do.
+ return None;
+ }
+
+ let spending_tx = match self.spend_outputs(&*sweeper_state, respend_descriptors) {
+ Ok(spending_tx) => {
+ log_debug!(
+ self.logger,
+ "Generating and broadcasting sweeping transaction {}",
+ spending_tx.txid()
+ );
+ spending_tx
+ },
+ Err(e) => {
+ log_error!(self.logger, "Error spending outputs: {:?}", e);
+ return None;
+ },
+ };
+
+ // As we didn't modify the state so far, the same filter_fn yields the same elements as
+ // above.
+ let respend_outputs = sweeper_state.outputs.iter_mut().filter(|o| filter_fn(&**o));
+ for output_info in respend_outputs {
+ if let Some(filter) = self.chain_data_source.as_ref() {
+ let watched_output = output_info.to_watched_output(cur_hash);
+ filter.register_output(watched_output);
+ }
+
+ output_info.status.broadcast(cur_hash, cur_height, spending_tx.clone());
+ }
+
+ Some(spending_tx)
+ }
+
+ fn prune_confirmed_outputs(&self, sweeper_state: &mut SweeperState) {
+ let cur_height = sweeper_state.best_block.height;
+
+ // Prune all outputs that have sufficient depth by now.
+ sweeper_state.outputs.retain(|o| {
+ if let Some(confirmation_height) = o.status.confirmation_height() {
+ if cur_height >= confirmation_height + ANTI_REORG_DELAY - 1 {
+ log_debug!(self.logger,
+ "Pruning swept output as sufficiently confirmed via spend in transaction {:?}. Pruned descriptor: {:?}",
+ o.status.latest_spending_tx().map(|t| t.txid()), o.descriptor
+ );
+ return false;
+ }
+ }
+ true
+ });
+ }
+
+ fn persist_state(&self, sweeper_state: &SweeperState) -> Result<(), io::Error> {
+ self.kv_store
+ .write(
+ OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE,
+ OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE,
+ OUTPUT_SWEEPER_PERSISTENCE_KEY,
+ &sweeper_state.encode(),
+ )
+ .map_err(|e| {
+ log_error!(
+ self.logger,
+ "Write for key {}/{}/{} failed due to: {}",
+ OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE,
+ OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE,
+ OUTPUT_SWEEPER_PERSISTENCE_KEY,
+ e
+ );
+ e
+ })
+ }
+
+ fn spend_outputs(
+ &self, sweeper_state: &SweeperState, descriptors: Vec<&SpendableOutputDescriptor>,
+ ) -> Result<Transaction, ()> {
+ let tx_feerate =
+ self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::OutputSpendingFee);
+ let change_destination_script =
+ self.change_destination_source.get_change_destination_script()?;
+ let cur_height = sweeper_state.best_block.height;
+ let locktime = Some(LockTime::from_height(cur_height).unwrap_or(LockTime::ZERO));
+ self.output_spender.spend_spendable_outputs(
+ &descriptors,
+ Vec::new(),
+ change_destination_script,
+ tx_feerate,
+ locktime,
+ &Secp256k1::new(),
+ )
+ }
+
+ fn transactions_confirmed_internal(
+ &self, sweeper_state: &mut SweeperState, header: &Header,
+ txdata: &chain::transaction::TransactionData, height: u32,
+ ) {
+ let confirmation_hash = header.block_hash();
+ for (_, tx) in txdata {
+ for output_info in sweeper_state.outputs.iter_mut() {
+ if output_info.is_spent_in(*tx) {
+ output_info.status.confirmed(confirmation_hash, height, (*tx).clone())
+ }
+ }
+ }
+ }
+
+ fn best_block_updated_internal(
+ &self, sweeper_state: &mut SweeperState, header: &Header, height: u32,
+ ) -> Option<Transaction> {
+ sweeper_state.best_block = BestBlock::new(header.block_hash(), height);
+ self.prune_confirmed_outputs(sweeper_state);
+ let spending_tx_opt = self.regenerate_spend_if_necessary(sweeper_state);
+ spending_tx_opt
+ }
+}
+
+impl<B: Deref, D: Deref, E: Deref, F: Deref, K: Deref, L: Deref, O: Deref> Listen
+ for OutputSweeper<B, D, E, F, K, L, O>
+where
+ B::Target: BroadcasterInterface,
+ D::Target: ChangeDestinationSource,
+ E::Target: FeeEstimator,
+ F::Target: Filter + Sync + Send,
+ K::Target: KVStore,
+ L::Target: Logger,
+ O::Target: OutputSpender,
+{
+ fn filtered_block_connected(
+ &self, header: &Header, txdata: &chain::transaction::TransactionData, height: u32,
+ ) {
+ let mut spending_tx_opt;
+ {
+ let mut state_lock = self.sweeper_state.lock().unwrap();
+ assert_eq!(state_lock.best_block.block_hash, header.prev_blockhash,
+ "Blocks must be connected in chain-order - the connected header must build on the last connected header");
+ assert_eq!(state_lock.best_block.height, height - 1,
+ "Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
+
+ self.transactions_confirmed_internal(&mut *state_lock, header, txdata, height);
+ spending_tx_opt = self.best_block_updated_internal(&mut *state_lock, header, height);
+
+ self.persist_state(&*state_lock).unwrap_or_else(|e| {
+ log_error!(self.logger, "Error persisting OutputSweeper: {:?}", e);
+ // Skip broadcasting if the persist failed.
+ spending_tx_opt = None;
+ });
+ }
+
+ if let Some(spending_tx) = spending_tx_opt {
+ self.broadcaster.broadcast_transactions(&[&spending_tx]);
+ }
+ }
+
+ fn block_disconnected(&self, header: &Header, height: u32) {
+ let mut state_lock = self.sweeper_state.lock().unwrap();
+
+ let new_height = height - 1;
+ let block_hash = header.block_hash();
+
+ assert_eq!(state_lock.best_block.block_hash, block_hash,
+ "Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
+ assert_eq!(state_lock.best_block.height, height,
+ "Blocks must be disconnected in chain-order - the disconnected block must have the correct height");
+ state_lock.best_block = BestBlock::new(header.prev_blockhash, new_height);
+
+ for output_info in state_lock.outputs.iter_mut() {
+ if output_info.status.confirmation_hash() == Some(block_hash) {
+ debug_assert_eq!(output_info.status.confirmation_height(), Some(height));
+ output_info.status.unconfirmed();
+ }
+ }
+
+ self.persist_state(&*state_lock).unwrap_or_else(|e| {
+ log_error!(self.logger, "Error persisting OutputSweeper: {:?}", e);
+ });
+ }
+}
+
+impl<B: Deref, D: Deref, E: Deref, F: Deref, K: Deref, L: Deref, O: Deref> Confirm
+ for OutputSweeper<B, D, E, F, K, L, O>
+where
+ B::Target: BroadcasterInterface,
+ D::Target: ChangeDestinationSource,
+ E::Target: FeeEstimator,
+ F::Target: Filter + Sync + Send,
+ K::Target: KVStore,
+ L::Target: Logger,
+ O::Target: OutputSpender,
+{
+ fn transactions_confirmed(
+ &self, header: &Header, txdata: &chain::transaction::TransactionData, height: u32,
+ ) {
+ let mut state_lock = self.sweeper_state.lock().unwrap();
+ self.transactions_confirmed_internal(&mut *state_lock, header, txdata, height);
+ self.persist_state(&*state_lock).unwrap_or_else(|e| {
+ log_error!(self.logger, "Error persisting OutputSweeper: {:?}", e);
+ });
+ }
+
+ fn transaction_unconfirmed(&self, txid: &Txid) {
+ let mut state_lock = self.sweeper_state.lock().unwrap();
+
+ // Get what height was unconfirmed.
+ let unconf_height = state_lock
+ .outputs
+ .iter()
+ .find(|o| o.status.latest_spending_tx().map(|tx| tx.txid()) == Some(*txid))
+ .and_then(|o| o.status.confirmation_height());
+
+ if let Some(unconf_height) = unconf_height {
+ // Unconfirm all >= this height.
+ state_lock
+ .outputs
+ .iter_mut()
+ .filter(|o| o.status.confirmation_height() >= Some(unconf_height))
+ .for_each(|o| o.status.unconfirmed());
+
+ self.persist_state(&*state_lock).unwrap_or_else(|e| {
+ log_error!(self.logger, "Error persisting OutputSweeper: {:?}", e);
+ });
+ }
+ }
+
+ fn best_block_updated(&self, header: &Header, height: u32) {
+ let mut spending_tx_opt;
+ {
+ let mut state_lock = self.sweeper_state.lock().unwrap();
+ spending_tx_opt = self.best_block_updated_internal(&mut *state_lock, header, height);
+ self.persist_state(&*state_lock).unwrap_or_else(|e| {
+ log_error!(self.logger, "Error persisting OutputSweeper: {:?}", e);
+ // Skip broadcasting if the persist failed.
+ spending_tx_opt = None;
+ });
+ }
+
+ if let Some(spending_tx) = spending_tx_opt {
+ self.broadcaster.broadcast_transactions(&[&spending_tx]);
+ }
+ }
+
+ fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
+ let state_lock = self.sweeper_state.lock().unwrap();
+ state_lock
+ .outputs
+ .iter()
+ .filter_map(|o| match o.status {
+ OutputSpendStatus::PendingThresholdConfirmations {
+ ref latest_spending_tx,
+ confirmation_height,
+ confirmation_hash,
+ ..
+ } => Some((latest_spending_tx.txid(), confirmation_height, Some(confirmation_hash))),
+ _ => None,
+ })
+ .collect::<Vec<_>>()
+ }
+}
+
+#[derive(Debug, Clone)]
+struct SweeperState {
+ outputs: Vec<TrackedSpendableOutput>,
+ best_block: BestBlock,
+}
+
+impl_writeable_tlv_based!(SweeperState, {
+ (0, outputs, required_vec),
+ (2, best_block, required),
+});
+
+/// A `enum` signalling to the [`OutputSweeper`] that it should delay spending an output until a
+/// future block height is reached.
+#[derive(Debug, Clone)]
+pub enum SpendingDelay {
+ /// A relative delay indicating we shouldn't spend the output before `cur_height + num_blocks`
+ /// is reached.
+ Relative {
+ /// The number of blocks until we'll generate and broadcast the spending transaction.
+ num_blocks: u32,
+ },
+ /// An absolute delay indicating we shouldn't spend the output before `height` is reached.
+ Absolute {
+ /// The height at which we'll generate and broadcast the spending transaction.
+ height: u32,
+ },
+}
+
+impl<B: Deref, D: Deref, E: Deref, F: Deref, K: Deref, L: Deref, O: Deref>
+ ReadableArgs<(B, E, Option<F>, O, D, K, L)> for OutputSweeper<B, D, E, F, K, L, O>
+where
+ B::Target: BroadcasterInterface,
+ D::Target: ChangeDestinationSource,
+ E::Target: FeeEstimator,
+ F::Target: Filter + Sync + Send,
+ K::Target: KVStore,
+ L::Target: Logger,
+ O::Target: OutputSpender,
+{
+ #[inline]
+ fn read<R: io::Read>(
+ reader: &mut R, args: (B, E, Option<F>, O, D, K, L),
+ ) -> Result<Self, DecodeError> {
+ let (
+ broadcaster,
+ fee_estimator,
+ chain_data_source,
+ output_spender,
+ change_destination_source,
+ kv_store,
+ logger,
+ ) = args;
+ let state = SweeperState::read(reader)?;
+ let best_block = state.best_block;
+
+ if let Some(filter) = chain_data_source.as_ref() {
+ for output_info in &state.outputs {
+ let watched_output = output_info.to_watched_output(best_block.block_hash);
+ filter.register_output(watched_output);
+ }
+ }
+
+ let sweeper_state = Mutex::new(state);
+ Ok(Self {
+ sweeper_state,
+ broadcaster,
+ fee_estimator,
+ chain_data_source,
+ output_spender,
+ change_destination_source,
+ kv_store,
+ logger,
+ })
+ }
+}
+
+impl<B: Deref, D: Deref, E: Deref, F: Deref, K: Deref, L: Deref, O: Deref>
+ ReadableArgs<(B, E, Option<F>, O, D, K, L)> for (BestBlock, OutputSweeper<B, D, E, F, K, L, O>)
+where
+ B::Target: BroadcasterInterface,
+ D::Target: ChangeDestinationSource,
+ E::Target: FeeEstimator,
+ F::Target: Filter + Sync + Send,
+ K::Target: KVStore,
+ L::Target: Logger,
+ O::Target: OutputSpender,
+{
+ #[inline]
+ fn read<R: io::Read>(
+ reader: &mut R, args: (B, E, Option<F>, O, D, K, L),
+ ) -> Result<Self, DecodeError> {
+ let (
+ broadcaster,
+ fee_estimator,
+ chain_data_source,
+ output_spender,
+ change_destination_source,
+ kv_store,
+ logger,
+ ) = args;
+ let state = SweeperState::read(reader)?;
+ let best_block = state.best_block;
+
+ if let Some(filter) = chain_data_source.as_ref() {
+ for output_info in &state.outputs {
+ let watched_output = output_info.to_watched_output(best_block.block_hash);
+ filter.register_output(watched_output);
+ }
+ }
+
+ let sweeper_state = Mutex::new(state);
+ Ok((
+ best_block,
+ OutputSweeper {
+ sweeper_state,
+ broadcaster,
+ fee_estimator,
+ chain_data_source,
+ output_spender,
+ change_destination_source,
+ kv_store,
+ logger,
+ },
+ ))
+ }
+}
use crate::sign::{InMemorySigner, ChannelSigner};
use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
+#[allow(unused_imports)]
use crate::prelude::*;
+
use core::cmp;
use crate::sync::{Mutex, Arc};
#[cfg(test)] use crate::sync::MutexGuard;
/// When `true`, methods are forwarded to the underlying signer as normal. When `false`, some
/// methods will return `Err` indicating that the signer is unavailable. Intended to be used for
/// testing asynchronous signing.
- #[cfg(test)]
pub fn set_available(&self, available: bool) {
*self.available.lock().unwrap() = available;
}
}
fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
+ if !*self.available.lock().unwrap() {
+ return Err(());
+ }
Ok(EcdsaChannelSigner::sign_justice_revoked_output(&self.inner, justice_tx, input, amount, per_commitment_key, secp_ctx).unwrap())
}
fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
+ if !*self.available.lock().unwrap() {
+ return Err(());
+ }
Ok(EcdsaChannelSigner::sign_justice_revoked_htlc(&self.inner, justice_tx, input, amount, per_commitment_key, htlc, secp_ctx).unwrap())
}
&self, htlc_tx: &Transaction, input: usize, htlc_descriptor: &HTLCDescriptor,
secp_ctx: &Secp256k1<secp256k1::All>
) -> Result<Signature, ()> {
+ if !*self.available.lock().unwrap() {
+ return Err(());
+ }
let state = self.state.lock().unwrap();
if state.last_holder_revoked_commitment - 1 != htlc_descriptor.per_commitment_number &&
state.last_holder_revoked_commitment - 2 != htlc_descriptor.per_commitment_number
}
fn sign_counterparty_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, amount: u64, per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
+ if !*self.available.lock().unwrap() {
+ return Err(());
+ }
Ok(EcdsaChannelSigner::sign_counterparty_htlc_transaction(&self.inner, htlc_tx, input, amount, per_commitment_point, htlc, secp_ctx).unwrap())
}
// As long as our minimum dust limit is enforced and is greater than our anchor output
// value, an anchor output can only have an index within [0, 1].
assert!(anchor_tx.input[input].previous_output.vout == 0 || anchor_tx.input[input].previous_output.vout == 1);
+ if !*self.available.lock().unwrap() {
+ return Err(());
+ }
EcdsaChannelSigner::sign_holder_anchor_input(&self.inner, anchor_tx, input, secp_ctx)
}
use crate::chain::WatchedOutput;
use crate::chain::chaininterface;
use crate::chain::chaininterface::ConfirmationTarget;
+#[cfg(test)]
use crate::chain::chaininterface::FEERATE_FLOOR_SATS_PER_KW;
use crate::chain::chainmonitor;
use crate::chain::chainmonitor::{MonitorUpdateId, UpdateOrigin};
use crate::events::bump_transaction::{WalletSource, Utxo};
use crate::ln::ChannelId;
use crate::ln::channelmanager::{ChannelDetails, self};
+#[cfg(test)]
use crate::ln::chan_utils::CommitmentTransaction;
use crate::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
use crate::ln::{msgs, wire};
use crate::ln::script::ShutdownScript;
use crate::offers::invoice::{BlindedPayInfo, UnsignedBolt12Invoice};
use crate::offers::invoice_request::UnsignedInvoiceRequest;
-use crate::onion_message::messenger::{Destination, MessageRouter, OnionMessagePath};
+use crate::onion_message::messenger::{DefaultMessageRouter, Destination, MessageRouter, OnionMessagePath};
use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId, RoutingFees};
use crate::routing::utxo::{UtxoLookup, UtxoLookupError, UtxoResult};
-use crate::routing::router::{find_route, InFlightHtlcs, Path, Route, RouteParameters, RouteHintHop, Router, ScorerAccountingForInFlightHtlcs};
+use crate::routing::router::{DefaultRouter, InFlightHtlcs, Path, Route, RouteParameters, RouteHintHop, Router, ScorerAccountingForInFlightHtlcs};
use crate::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp};
use crate::sync::RwLock;
use crate::util::config::UserConfig;
use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature};
use bitcoin::secp256k1::schnorr;
-#[cfg(any(test, feature = "_test_utils"))]
-use regex;
-
use crate::io;
use crate::prelude::*;
use core::cell::RefCell;
use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use core::mem;
use bitcoin::bech32::u5;
-use crate::sign::{InMemorySigner, Recipient, EntropySource, NodeSigner, SignerProvider};
+use crate::sign::{InMemorySigner, RandomBytes, Recipient, EntropySource, NodeSigner, SignerProvider};
#[cfg(feature = "std")]
use std::time::{SystemTime, UNIX_EPOCH};
}
pub struct TestRouter<'a> {
+ pub router: DefaultRouter<
+ Arc<NetworkGraph<&'a TestLogger>>,
+ &'a TestLogger,
+ Arc<RandomBytes>,
+ &'a RwLock<TestScorer>,
+ (),
+ TestScorer,
+ >,
+ //pub entropy_source: &'a RandomBytes,
pub network_graph: Arc<NetworkGraph<&'a TestLogger>>,
pub next_routes: Mutex<VecDeque<(RouteParameters, Result<Route, LightningError>)>>,
pub scorer: &'a RwLock<TestScorer>,
}
impl<'a> TestRouter<'a> {
- pub fn new(network_graph: Arc<NetworkGraph<&'a TestLogger>>, scorer: &'a RwLock<TestScorer>) -> Self {
- Self { network_graph, next_routes: Mutex::new(VecDeque::new()), scorer }
+ pub fn new(
+ network_graph: Arc<NetworkGraph<&'a TestLogger>>, logger: &'a TestLogger,
+ scorer: &'a RwLock<TestScorer>,
+ ) -> Self {
+ let entropy_source = Arc::new(RandomBytes::new([42; 32]));
+ Self {
+ router: DefaultRouter::new(network_graph.clone(), logger, entropy_source, scorer, ()),
+ network_graph,
+ next_routes: Mutex::new(VecDeque::new()),
+ scorer,
+ }
}
pub fn expect_find_route(&self, query: RouteParameters, result: Result<Route, LightningError>) {
&self, payer: &PublicKey, params: &RouteParameters, first_hops: Option<&[&ChannelDetails]>,
inflight_htlcs: InFlightHtlcs
) -> Result<Route, msgs::LightningError> {
- if let Some((find_route_query, find_route_res)) = self.next_routes.lock().unwrap().pop_front() {
+ let route_res;
+ let next_route_opt = self.next_routes.lock().unwrap().pop_front();
+ if let Some((find_route_query, find_route_res)) = next_route_opt {
assert_eq!(find_route_query, *params);
if let Ok(ref route) = find_route_res {
assert_eq!(route.route_params, Some(find_route_query));
details: first_hops[idx],
payer_node_id: &node_id,
});
- scorer.channel_penalty_msat(&candidate, usage, &());
+ scorer.channel_penalty_msat(&candidate, usage, &Default::default());
continue;
}
}
info: directed,
short_channel_id: hop.short_channel_id,
});
- scorer.channel_penalty_msat(&candidate, usage, &());
+ scorer.channel_penalty_msat(&candidate, usage, &Default::default());
} else {
let target_node_id = NodeId::from_pubkey(&hop.pubkey);
let route_hint = RouteHintHop {
hint: &route_hint,
target_node_id: &target_node_id,
});
- scorer.channel_penalty_msat(&candidate, usage, &());
+ scorer.channel_penalty_msat(&candidate, usage, &Default::default());
}
prev_hop_node = &hop.pubkey;
}
}
}
- return find_route_res;
+ route_res = find_route_res;
+ } else {
+ route_res = self.router.find_route(payer, params, first_hops, inflight_htlcs);
+ };
+
+ if let Ok(route) = &route_res {
+ // Previously, `Route`s failed to round-trip through serialization due to a write/read
+ // mismatch. Thus, here we test all test-generated routes round-trip:
+ let ser = route.encode();
+ assert_eq!(Route::read(&mut &ser[..]).unwrap(), *route);
}
- let logger = TestLogger::new();
- find_route(
- payer, params, &self.network_graph, first_hops, &logger,
- &ScorerAccountingForInFlightHtlcs::new(self.scorer.read().unwrap(), &inflight_htlcs), &Default::default(),
- &[42; 32]
- )
+ route_res
}
fn create_blinded_payment_paths<
- ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+ T: secp256k1::Signing + secp256k1::Verification
>(
- &self, _recipient: PublicKey, _first_hops: Vec<ChannelDetails>, _tlvs: ReceiveTlvs,
- _amount_msats: u64, _entropy_source: &ES, _secp_ctx: &Secp256k1<T>
+ &self, recipient: PublicKey, first_hops: Vec<ChannelDetails>, tlvs: ReceiveTlvs,
+ amount_msats: u64, secp_ctx: &Secp256k1<T>,
) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
- unreachable!()
+ self.router.create_blinded_payment_paths(
+ recipient, first_hops, tlvs, amount_msats, secp_ctx
+ )
}
}
impl<'a> MessageRouter for TestRouter<'a> {
fn find_path(
- &self, _sender: PublicKey, _peers: Vec<PublicKey>, _destination: Destination
+ &self, sender: PublicKey, peers: Vec<PublicKey>, destination: Destination
) -> Result<OnionMessagePath, ()> {
- unreachable!()
+ self.router.find_path(sender, peers, destination)
}
fn create_blinded_paths<
- ES: EntropySource + ?Sized, T: secp256k1::Signing + secp256k1::Verification
+ T: secp256k1::Signing + secp256k1::Verification
>(
- &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _entropy_source: &ES,
- _secp_ctx: &Secp256k1<T>
+ &self, recipient: PublicKey, peers: Vec<PublicKey>, secp_ctx: &Secp256k1<T>,
) -> Result<Vec<BlindedPath>, ()> {
- unreachable!()
+ self.router.create_blinded_paths(recipient, peers, secp_ctx)
}
}
}
}
+pub struct TestMessageRouter<'a> {
+ inner: DefaultMessageRouter<Arc<NetworkGraph<&'a TestLogger>>, &'a TestLogger, &'a TestKeysInterface>,
+}
+
+impl<'a> TestMessageRouter<'a> {
+ pub fn new(network_graph: Arc<NetworkGraph<&'a TestLogger>>, entropy_source: &'a TestKeysInterface) -> Self {
+ Self { inner: DefaultMessageRouter::new(network_graph, entropy_source) }
+ }
+}
+
+impl<'a> MessageRouter for TestMessageRouter<'a> {
+ fn find_path(
+ &self, sender: PublicKey, peers: Vec<PublicKey>, destination: Destination
+ ) -> Result<OnionMessagePath, ()> {
+ self.inner.find_path(sender, peers, destination)
+ }
+
+ fn create_blinded_paths<T: secp256k1::Signing + secp256k1::Verification>(
+ &self, recipient: PublicKey, peers: Vec<PublicKey>, secp_ctx: &Secp256k1<T>,
+ ) -> Result<Vec<BlindedPath>, ()> {
+ self.inner.create_blinded_paths(recipient, peers, secp_ctx)
+ }
+}
+
pub struct OnlyReadsKeysInterface {}
impl EntropySource for OnlyReadsKeysInterface {
pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a dyn chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a dyn chainmonitor::Persist<TestChannelSigner>, keys_manager: &'a TestKeysInterface) -> Self {
Self {
added_monitors: Mutex::new(Vec::new()),
- monitor_updates: Mutex::new(HashMap::new()),
- latest_monitor_update_id: Mutex::new(HashMap::new()),
+ monitor_updates: Mutex::new(new_hash_map()),
+ latest_monitor_update_id: Mutex::new(new_hash_map()),
chain_monitor: chainmonitor::ChainMonitor::new(chain_source, broadcaster, logger, fee_estimator, persister),
keys_manager,
expect_channel_force_closed: Mutex::new(None),
let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
&mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap().1;
assert!(new_monitor == monitor);
- self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(),
+ self.latest_monitor_update_id.lock().unwrap().insert(monitor.channel_id(),
(funding_txo, monitor.get_latest_update_id(), MonitorUpdateId::from_new_monitor(&monitor)));
self.added_monitors.lock().unwrap().push((funding_txo, monitor));
self.chain_monitor.watch_channel(funding_txo, new_monitor)
update.write(&mut w).unwrap();
assert!(channelmonitor::ChannelMonitorUpdate::read(
&mut io::Cursor::new(&w.0)).unwrap() == *update);
+ let channel_id = update.channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo));
- self.monitor_updates.lock().unwrap().entry(funding_txo.to_channel_id()).or_insert(Vec::new()).push(update.clone());
+ self.monitor_updates.lock().unwrap().entry(channel_id).or_insert(Vec::new()).push(update.clone());
if let Some(exp) = self.expect_channel_force_closed.lock().unwrap().take() {
- assert_eq!(funding_txo.to_channel_id(), exp.0);
+ assert_eq!(channel_id, exp.0);
assert_eq!(update.updates.len(), 1);
if let channelmonitor::ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
assert_eq!(should_broadcast, exp.1);
} else { panic!(); }
}
- self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(),
+ self.latest_monitor_update_id.lock().unwrap().insert(channel_id,
(funding_txo, update.update_id, MonitorUpdateId::from_monitor_update(update)));
let update_res = self.chain_monitor.update_channel(funding_txo, update);
// At every point where we get a monitor update, we should be able to send a useful monitor
let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
&mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap().1;
if let Some(chan_id) = self.expect_monitor_round_trip_fail.lock().unwrap().take() {
- assert_eq!(chan_id, funding_txo.to_channel_id());
+ assert_eq!(chan_id, channel_id);
assert!(new_monitor != *monitor);
} else {
assert!(new_monitor == *monitor);
update_res
}
- fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec<MonitorEvent>, Option<PublicKey>)> {
+ fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
return self.chain_monitor.release_pending_monitor_events();
}
}
+#[cfg(test)]
struct JusticeTxData {
justice_tx: Transaction,
value: u64,
commitment_number: u64,
}
+#[cfg(test)]
pub(crate) struct WatchtowerPersister {
persister: TestPersister,
/// Upon a new commitment_signed, we'll get a
destination_script: ScriptBuf,
}
+#[cfg(test)]
impl WatchtowerPersister {
#[cfg(test)]
pub(crate) fn new(destination_script: ScriptBuf) -> Self {
WatchtowerPersister {
persister: TestPersister::new(),
- unsigned_justice_tx_data: Mutex::new(HashMap::new()),
- watchtower_state: Mutex::new(HashMap::new()),
+ unsigned_justice_tx_data: Mutex::new(new_hash_map()),
+ watchtower_state: Mutex::new(new_hash_map()),
destination_script,
}
}
}
}
+#[cfg(test)]
impl<Signer: sign::ecdsa::WriteableEcdsaChannelSigner> chainmonitor::Persist<Signer> for WatchtowerPersister {
fn persist_new_channel(&self, funding_txo: OutPoint,
data: &channelmonitor::ChannelMonitor<Signer>, id: MonitorUpdateId
assert!(self.unsigned_justice_tx_data.lock().unwrap()
.insert(funding_txo, VecDeque::new()).is_none());
assert!(self.watchtower_state.lock().unwrap()
- .insert(funding_txo, HashMap::new()).is_none());
+ .insert(funding_txo, new_hash_map()).is_none());
let initial_counterparty_commitment_tx = data.initial_counterparty_commitment_tx()
.expect("First and only call expects Some");
}
res
}
+
+ fn archive_persisted_channel(&self, funding_txo: OutPoint) {
+ <TestPersister as chainmonitor::Persist<TestChannelSigner>>::archive_persisted_channel(&self.persister, funding_txo);
+ }
}
pub struct TestPersister {
pub fn new() -> Self {
Self {
update_rets: Mutex::new(VecDeque::new()),
- chain_sync_monitor_persistences: Mutex::new(HashMap::new()),
- offchain_monitor_updates: Mutex::new(HashMap::new()),
+ chain_sync_monitor_persistences: Mutex::new(new_hash_map()),
+ offchain_monitor_updates: Mutex::new(new_hash_map()),
}
}
}
let is_chain_sync = if let UpdateOrigin::ChainSync(_) = update_id.contents { true } else { false };
if is_chain_sync {
- self.chain_sync_monitor_persistences.lock().unwrap().entry(funding_txo).or_insert(HashSet::new()).insert(update_id);
+ self.chain_sync_monitor_persistences.lock().unwrap().entry(funding_txo).or_insert(new_hash_set()).insert(update_id);
} else {
- self.offchain_monitor_updates.lock().unwrap().entry(funding_txo).or_insert(HashSet::new()).insert(update_id);
+ self.offchain_monitor_updates.lock().unwrap().entry(funding_txo).or_insert(new_hash_set()).insert(update_id);
}
ret
}
+
+ fn archive_persisted_channel(&self, funding_txo: OutPoint) {
+ // remove the channel from the offchain_monitor_updates map
+ match self.offchain_monitor_updates.lock().unwrap().remove(&funding_txo) {
+ Some(_) => {},
+ None => {
+ // If the channel was not in the offchain_monitor_updates map, it should be in the
+ // chain_sync_monitor_persistences map.
+ assert!(self.chain_sync_monitor_persistences.lock().unwrap().remove(&funding_txo).is_some());
+ }
+ };
+ }
}
pub struct TestStore {
impl TestStore {
pub fn new(read_only: bool) -> Self {
- let persisted_bytes = Mutex::new(HashMap::new());
+ let persisted_bytes = Mutex::new(new_hash_map());
Self { persisted_bytes, read_only }
}
}
} else {
format!("{}/{}", primary_namespace, secondary_namespace)
};
- let outer_e = persisted_lock.entry(prefixed).or_insert(HashMap::new());
+ let outer_e = persisted_lock.entry(prefixed).or_insert(new_hash_map());
let mut bytes = Vec::new();
bytes.write_all(buf)?;
outer_e.insert(key.to_string(), bytes);
}
}
+unsafe impl Sync for TestStore {}
+unsafe impl Send for TestStore {}
+
pub struct TestBroadcaster {
pub txn_broadcasted: Mutex<Vec<Transaction>>,
pub blocks: Arc<Mutex<Vec<(Block, u32)>>>,
pub fn unique_txn_broadcast(&self) -> Vec<Transaction> {
let mut txn = self.txn_broadcasted.lock().unwrap().split_off(0);
- let mut seen = HashSet::new();
+ let mut seen = new_hash_set();
txn.retain(|tx| seen.insert(tx.txid()));
txn
}
TestChannelMessageHandler {
pending_events: Mutex::new(Vec::new()),
expected_recv_msgs: Mutex::new(None),
- connected_peers: Mutex::new(HashSet::new()),
+ connected_peers: Mutex::new(new_hash_set()),
message_fetch_counter: AtomicUsize::new(0),
chain_hash,
}
fn handle_stfu(&self, _their_node_id: &PublicKey, msg: &msgs::Stfu) {
self.received_msg(wire::Message::Stfu(msg.clone()));
}
+ #[cfg(splicing)]
fn handle_splice(&self, _their_node_id: &PublicKey, msg: &msgs::Splice) {
self.received_msg(wire::Message::Splice(msg.clone()));
}
+ #[cfg(splicing)]
fn handle_splice_ack(&self, _their_node_id: &PublicKey, msg: &msgs::SpliceAck) {
self.received_msg(wire::Message::SpliceAck(msg.clone()));
}
+ #[cfg(splicing)]
fn handle_splice_locked(&self, _their_node_id: &PublicKey, msg: &msgs::SpliceLocked) {
self.received_msg(wire::Message::SpliceLocked(msg.clone()));
}
TestLogger {
level: Level::Trace,
id,
- lines: Mutex::new(HashMap::new()),
- context: Mutex::new(HashMap::new()),
+ lines: Mutex::new(new_hash_map()),
+ context: Mutex::new(new_hash_map()),
}
}
pub fn enable(&mut self, level: Level) {
pub disable_revocation_policy_check: bool,
enforcement_states: Mutex<HashMap<[u8;32], Arc<Mutex<EnforcementState>>>>,
expectations: Mutex<Option<VecDeque<OnGetShutdownScriptpubkey>>>,
+ pub unavailable_signers: Mutex<HashSet<[u8; 32]>>,
}
impl EntropySource for TestKeysInterface {
fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> TestChannelSigner {
let keys = self.backing.derive_channel_signer(channel_value_satoshis, channel_keys_id);
let state = self.make_enforcement_state_cell(keys.commitment_seed);
- TestChannelSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check)
+ let signer = TestChannelSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check);
+ if self.unavailable_signers.lock().unwrap().contains(&channel_keys_id) {
+ signer.set_available(false);
+ }
+ signer
}
fn read_chan_signer(&self, buffer: &[u8]) -> Result<Self::EcdsaSigner, msgs::DecodeError> {
backing: sign::PhantomKeysManager::new(seed, now.as_secs(), now.subsec_nanos(), seed),
override_random_bytes: Mutex::new(None),
disable_revocation_policy_check: false,
- enforcement_states: Mutex::new(HashMap::new()),
+ enforcement_states: Mutex::new(new_hash_map()),
expectations: Mutex::new(None),
+ unavailable_signers: Mutex::new(new_hash_set()),
}
}
}
pub fn derive_channel_keys(&self, channel_value_satoshis: u64, id: &[u8; 32]) -> TestChannelSigner {
- let keys = self.backing.derive_channel_keys(channel_value_satoshis, id);
- let state = self.make_enforcement_state_cell(keys.commitment_seed);
- TestChannelSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check)
+ self.derive_channel_signer(channel_value_satoshis, *id)
}
fn make_enforcement_state_cell(&self, commitment_seed: [u8; 32]) -> Arc<Mutex<EnforcementState>> {
chain_hash: ChainHash::using_genesis_block(network),
utxo_ret: Mutex::new(UtxoResult::Sync(Ok(TxOut { value: u64::max_value(), script_pubkey }))),
get_utxo_call_count: AtomicUsize::new(0),
- watched_txn: Mutex::new(HashSet::new()),
- watched_outputs: Mutex::new(HashSet::new()),
+ watched_txn: Mutex::new(new_hash_set()),
+ watched_outputs: Mutex::new(new_hash_set()),
}
}
+ pub fn remove_watched_txn_and_outputs(&self, outpoint: OutPoint, script_pubkey: ScriptBuf) {
+ self.watched_outputs.lock().unwrap().remove(&(outpoint, script_pubkey.clone()));
+ self.watched_txn.lock().unwrap().remove(&(outpoint.txid, script_pubkey));
+ }
}
impl UtxoLookup for TestChainSource {
fn time_passed(&mut self, _duration_since_epoch: Duration) {}
}
+#[cfg(c_bindings)]
+impl crate::routing::scoring::Score for TestScorer {}
+
impl Drop for TestScorer {
fn drop(&mut self) {
#[cfg(feature = "std")] {
/// Returns an instance corresponding to the current moment.
fn now() -> Self;
- /// Returns the amount of time elapsed since `self` was created.
- fn elapsed(&self) -> Duration;
-
/// Returns the amount of time passed between `earlier` and `self`.
fn duration_since(&self, earlier: Self) -> Duration;
-
- /// Returns the amount of time passed since the beginning of [`Time`].
- ///
- /// Used during (de-)serialization.
- fn duration_since_epoch() -> Duration;
}
/// A state in which time has no meaning.
fn duration_since(&self, _earlier: Self) -> Duration {
Duration::from_secs(0)
}
-
- fn duration_since_epoch() -> Duration {
- Duration::from_secs(0)
- }
-
- fn elapsed(&self) -> Duration {
- Duration::from_secs(0)
- }
}
impl Sub<Duration> for Eternity {
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-#[cfg(not(feature = "no-std"))]
+#[cfg(feature = "std")]
pub struct MonotonicTime(std::time::Instant);
/// The amount of time to shift `Instant` forward to prevent overflow when subtracting a `Duration`
/// from `Instant::now` on some operating systems (e.g., iOS representing `Instance` as `u64`).
-#[cfg(not(feature = "no-std"))]
+#[cfg(feature = "std")]
const SHIFT: Duration = Duration::from_secs(10 * 365 * 24 * 60 * 60); // 10 years.
-#[cfg(not(feature = "no-std"))]
+#[cfg(feature = "std")]
impl Time for MonotonicTime {
fn now() -> Self {
let instant = std::time::Instant::now().checked_add(SHIFT).expect("Overflow on MonotonicTime instantiation");
let now = Self::now();
if now.0 > earlier.0 { now.0 - earlier.0 } else { Duration::from_secs(0) }
}
-
- fn duration_since_epoch() -> Duration {
- use std::time::SystemTime;
- SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap()
- }
-
- fn elapsed(&self) -> Duration {
- Self::now().0 - self.0
- }
}
-#[cfg(not(feature = "no-std"))]
+#[cfg(feature = "std")]
impl Sub<Duration> for MonotonicTime {
type Output = Self;
impl Time for SinceEpoch {
fn now() -> Self {
- Self(Self::duration_since_epoch())
+ Self(Self::ELAPSED.with(|elapsed| elapsed.get()))
}
fn duration_since(&self, earlier: Self) -> Duration {
self.0 - earlier.0
}
-
- fn duration_since_epoch() -> Duration {
- Self::ELAPSED.with(|elapsed| elapsed.get())
- }
-
- fn elapsed(&self) -> Duration {
- Self::duration_since_epoch() - self.0
- }
}
impl Sub<Duration> for SinceEpoch {
#[test]
fn time_passes_when_advanced() {
let now = SinceEpoch::now();
- assert_eq!(now.elapsed(), Duration::from_secs(0));
SinceEpoch::advance(Duration::from_secs(1));
SinceEpoch::advance(Duration::from_secs(1));
- let elapsed = now.elapsed();
let later = SinceEpoch::now();
- assert_eq!(elapsed, Duration::from_secs(2));
- assert_eq!(later - elapsed, now);
+ assert_eq!(now.0 + Duration::from_secs(2), later.0);
}
#[test]
fn time_never_passes_in_an_eternity() {
let now = Eternity::now();
- let elapsed = now.elapsed();
let later = Eternity::now();
- assert_eq!(now.elapsed(), Duration::from_secs(0));
- assert_eq!(later - elapsed, now);
- }
-
- #[test]
- #[cfg(not(feature = "no-std"))]
- fn monotonic_time_subtracts() {
- let now = super::MonotonicTime::now();
- assert!(now.elapsed() < Duration::from_secs(10));
-
- let ten_years = Duration::from_secs(10 * 365 * 24 * 60 * 60);
- let past = now - ten_years;
- assert!(past.elapsed() >= ten_years);
+ assert_eq!(later, now);
}
}
use crate::ln::msgs::MAX_VALUE_MSAT;
+#[allow(unused_imports)]
use crate::prelude::*;
+
use crate::io_extras::sink;
use core::cmp::Ordering;
use super::*;
use bitcoin::blockdata::locktime::absolute::LockTime;
- use bitcoin::blockdata::transaction::{Transaction, TxOut, TxIn, OutPoint};
- use bitcoin::blockdata::script::{ScriptBuf, Builder};
+ use bitcoin::blockdata::transaction::{TxIn, OutPoint};
+ use bitcoin::blockdata::script::Builder;
use bitcoin::hash_types::{PubkeyHash, Txid};
use bitcoin::hashes::Hash;
use bitcoin::hashes::hex::FromHex;
use core::mem;
use crate::sync::Mutex;
+#[allow(unused_imports)]
use crate::prelude::*;
#[cfg(feature = "std")]
/// Gets a [`Future`] that will get woken up with any waiters
pub(crate) fn get_future(&self) -> Future {
let mut lock = self.notify_pending.lock().unwrap();
+ let mut self_idx = 0;
if let Some(existing_state) = &lock.1 {
- if existing_state.lock().unwrap().callbacks_made {
+ let mut locked = existing_state.lock().unwrap();
+ if locked.callbacks_made {
// If the existing `FutureState` has completed and actually made callbacks,
// consider the notification flag to have been cleared and reset the future state.
+ mem::drop(locked);
lock.1.take();
lock.0 = false;
+ } else {
+ self_idx = locked.next_idx;
+ locked.next_idx += 1;
}
}
if let Some(existing_state) = &lock.1 {
- Future { state: Arc::clone(&existing_state) }
+ Future { state: Arc::clone(&existing_state), self_idx }
} else {
let state = Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
+ std_future_callbacks: Vec::new(),
callbacks_with_state: Vec::new(),
complete: lock.0,
callbacks_made: false,
+ next_idx: 1,
}));
lock.1 = Some(Arc::clone(&state));
- Future { state }
+ Future { state, self_idx: 0 }
}
}
define_callback!();
pub(crate) struct FutureState {
- // When we're tracking whether a callback counts as having woken the user's code, we check the
- // first bool - set to false if we're just calling a Waker, and true if we're calling an actual
- // user-provided function.
- callbacks: Vec<(bool, Box<dyn FutureCallback>)>,
- callbacks_with_state: Vec<(bool, Box<dyn Fn(&Arc<Mutex<FutureState>>) -> () + Send>)>,
+ // `callbacks` count as having woken the users' code (as they go direct to the user), but
+ // `std_future_callbacks` and `callbacks_with_state` do not (as the first just wakes a future,
+ // we only count it after another `poll()` and the second wakes a `Sleeper` which handles
+ // setting `callbacks_made` itself).
+ callbacks: Vec<Box<dyn FutureCallback>>,
+ std_future_callbacks: Vec<(usize, StdWaker)>,
+ callbacks_with_state: Vec<Box<dyn Fn(&Arc<Mutex<FutureState>>) -> () + Send>>,
complete: bool,
callbacks_made: bool,
+ next_idx: usize,
}
fn complete_future(this: &Arc<Mutex<FutureState>>) -> bool {
let mut state_lock = this.lock().unwrap();
let state = &mut *state_lock;
- for (counts_as_call, callback) in state.callbacks.drain(..) {
+ for callback in state.callbacks.drain(..) {
callback.call();
- state.callbacks_made |= counts_as_call;
+ state.callbacks_made = true;
}
- for (counts_as_call, callback) in state.callbacks_with_state.drain(..) {
+ for (_, waker) in state.std_future_callbacks.drain(..) {
+ waker.0.wake_by_ref();
+ }
+ for callback in state.callbacks_with_state.drain(..) {
(callback)(this);
- state.callbacks_made |= counts_as_call;
}
state.complete = true;
state.callbacks_made
}
/// A simple future which can complete once, and calls some callback(s) when it does so.
-///
-/// Clones can be made and all futures cloned from the same source will complete at the same time.
-#[derive(Clone)]
pub struct Future {
state: Arc<Mutex<FutureState>>,
+ self_idx: usize,
}
impl Future {
mem::drop(state);
callback.call();
} else {
- state.callbacks.push((true, callback));
+ state.callbacks.push(callback);
}
}
/// Waits until this [`Future`] completes.
#[cfg(feature = "std")]
- pub fn wait(self) {
- Sleeper::from_single_future(self).wait();
+ pub fn wait(&self) {
+ Sleeper::from_single_future(&self).wait();
}
/// Waits until this [`Future`] completes or the given amount of time has elapsed.
///
/// Returns true if the [`Future`] completed, false if the time elapsed.
#[cfg(feature = "std")]
- pub fn wait_timeout(self, max_wait: Duration) -> bool {
- Sleeper::from_single_future(self).wait_timeout(max_wait)
+ pub fn wait_timeout(&self, max_wait: Duration) -> bool {
+ Sleeper::from_single_future(&self).wait_timeout(max_wait)
}
#[cfg(test)]
}
}
+impl Drop for Future {
+ fn drop(&mut self) {
+ self.state.lock().unwrap().std_future_callbacks.retain(|(idx, _)| *idx != self.self_idx);
+ }
+}
+
use core::task::Waker;
struct StdWaker(pub Waker);
-impl FutureCallback for StdWaker {
- fn call(&self) { self.0.wake_by_ref() }
-}
/// This is not exported to bindings users as Rust Futures aren't usable in language bindings.
impl<'a> StdFuture for Future {
Poll::Ready(())
} else {
let waker = cx.waker().clone();
- state.callbacks.push((false, Box::new(StdWaker(waker))));
+ state.std_future_callbacks.retain(|(idx, _)| *idx != self.self_idx);
+ state.std_future_callbacks.push((self.self_idx, StdWaker(waker)));
Poll::Pending
}
}
#[cfg(feature = "std")]
impl Sleeper {
/// Constructs a new sleeper from one future, allowing blocking on it.
- pub fn from_single_future(future: Future) -> Self {
- Self { notifiers: vec![future.state] }
+ pub fn from_single_future(future: &Future) -> Self {
+ Self { notifiers: vec![Arc::clone(&future.state)] }
}
/// Constructs a new sleeper from two futures, allowing blocking on both at once.
// Note that this is the common case - a ChannelManager and ChainMonitor.
- pub fn from_two_futures(fut_a: Future, fut_b: Future) -> Self {
- Self { notifiers: vec![fut_a.state, fut_b.state] }
+ pub fn from_two_futures(fut_a: &Future, fut_b: &Future) -> Self {
+ Self { notifiers: vec![Arc::clone(&fut_a.state), Arc::clone(&fut_b.state)] }
}
/// Constructs a new sleeper on many futures, allowing blocking on all at once.
pub fn new(futures: Vec<Future>) -> Self {
- Self { notifiers: futures.into_iter().map(|f| f.state).collect() }
+ Self { notifiers: futures.into_iter().map(|f| Arc::clone(&f.state)).collect() }
}
/// Prepares to go into a wait loop body, creating a condition variable which we can block on
/// and an `Arc<Mutex<Option<_>>>` which gets set to the waking `Future`'s state prior to the
*notified_fut_mtx.lock().unwrap() = Some(Arc::clone(¬ifier_mtx));
break;
}
- notifier.callbacks_with_state.push((false, Box::new(move |notifier_ref| {
+ notifier.callbacks_with_state.push(Box::new(move |notifier_ref| {
*notified_fut_ref.lock().unwrap() = Some(Arc::clone(notifier_ref));
cv_ref.notify_all();
- })));
+ }));
}
}
(cv, notified_fut_mtx)
use super::*;
use core::sync::atomic::{AtomicBool, Ordering};
use core::future::Future as FutureTrait;
- use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
+ use core::task::{RawWaker, RawWakerVTable};
#[test]
fn notifier_pre_notified_future() {
// Wait on the other thread to finish its sleep, note that the leak only happened if we
// actually have to sleep here, not if we immediately return.
- Sleeper::from_two_futures(future_a, future_b).wait();
+ Sleeper::from_two_futures(&future_a, &future_b).wait();
join_handle.join().unwrap();
// then drop the notifiers and make sure the future states are gone.
mem::drop(notifier_a);
mem::drop(notifier_b);
+ mem::drop(future_a);
+ mem::drop(future_b);
assert!(future_state_a.upgrade().is_none() && future_state_b.upgrade().is_none());
}
let future = Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
+ std_future_callbacks: Vec::new(),
callbacks_with_state: Vec::new(),
complete: false,
callbacks_made: false,
- }))
+ next_idx: 1,
+ })),
+ self_idx: 0,
};
let callback = Arc::new(AtomicBool::new(false));
let callback_ref = Arc::clone(&callback);
let future = Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
+ std_future_callbacks: Vec::new(),
callbacks_with_state: Vec::new(),
complete: false,
callbacks_made: false,
- }))
+ next_idx: 1,
+ })),
+ self_idx: 0,
};
complete_future(&future.state);
let mut future = Future {
state: Arc::new(Mutex::new(FutureState {
callbacks: Vec::new(),
+ std_future_callbacks: Vec::new(),
callbacks_with_state: Vec::new(),
complete: false,
callbacks_made: false,
- }))
+ next_idx: 2,
+ })),
+ self_idx: 0,
};
- let mut second_future = Future { state: Arc::clone(&future.state) };
+ let mut second_future = Future { state: Arc::clone(&future.state), self_idx: 1 };
let (woken, waker) = create_waker();
assert_eq!(Pin::new(&mut future).poll(&mut Context::from_waker(&waker)), Poll::Pending);
// Set both notifiers as woken without sleeping yet.
notifier_a.notify();
notifier_b.notify();
- Sleeper::from_two_futures(notifier_a.get_future(), notifier_b.get_future()).wait();
+ Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()).wait();
// One future has woken us up, but the other should still have a pending notification.
- Sleeper::from_two_futures(notifier_a.get_future(), notifier_b.get_future()).wait();
+ Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()).wait();
// However once we've slept twice, we should no longer have any pending notifications
- assert!(!Sleeper::from_two_futures(notifier_a.get_future(), notifier_b.get_future())
+ assert!(!Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future())
.wait_timeout(Duration::from_millis(10)));
// Test ordering somewhat more.
notifier_a.notify();
- Sleeper::from_two_futures(notifier_a.get_future(), notifier_b.get_future()).wait();
+ Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()).wait();
}
#[test]
// After sleeping one future (not guaranteed which one, however) will have its notification
// bit cleared.
- Sleeper::from_two_futures(notifier_a.get_future(), notifier_b.get_future()).wait();
+ Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()).wait();
// By registering a callback on the futures for both notifiers, one will complete
// immediately, but one will remain tied to the notifier, and will complete once the
notifier_b.notify();
assert!(callback_a.load(Ordering::SeqCst) && callback_b.load(Ordering::SeqCst));
- Sleeper::from_two_futures(notifier_a.get_future(), notifier_b.get_future()).wait();
- assert!(!Sleeper::from_two_futures(notifier_a.get_future(), notifier_b.get_future())
+ Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()).wait();
+ assert!(!Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future())
.wait_timeout(Duration::from_millis(10)));
}
+
+ #[test]
+ #[cfg(feature = "std")]
+ fn multi_poll_stores_single_waker() {
+ // When a `Future` is `poll()`ed multiple times, only the last `Waker` should be called,
+ // but previously we'd store all `Waker`s until they're all woken at once. This tests a few
+ // cases to ensure `Future`s avoid storing an endless set of `Waker`s.
+ let notifier = Notifier::new();
+ let future_state = Arc::clone(¬ifier.get_future().state);
+ assert_eq!(future_state.lock().unwrap().std_future_callbacks.len(), 0);
+
+ // Test that simply polling a future twice doesn't result in two pending `Waker`s.
+ let mut future_a = notifier.get_future();
+ assert_eq!(Pin::new(&mut future_a).poll(&mut Context::from_waker(&create_waker().1)), Poll::Pending);
+ assert_eq!(future_state.lock().unwrap().std_future_callbacks.len(), 1);
+ assert_eq!(Pin::new(&mut future_a).poll(&mut Context::from_waker(&create_waker().1)), Poll::Pending);
+ assert_eq!(future_state.lock().unwrap().std_future_callbacks.len(), 1);
+
+ // If we poll a second future, however, that will store a second `Waker`.
+ let mut future_b = notifier.get_future();
+ assert_eq!(Pin::new(&mut future_b).poll(&mut Context::from_waker(&create_waker().1)), Poll::Pending);
+ assert_eq!(future_state.lock().unwrap().std_future_callbacks.len(), 2);
+
+ // but when we drop the `Future`s, the pending Wakers will also be dropped.
+ mem::drop(future_a);
+ assert_eq!(future_state.lock().unwrap().std_future_callbacks.len(), 1);
+ mem::drop(future_b);
+ assert_eq!(future_state.lock().unwrap().std_future_callbacks.len(), 0);
+
+ // Further, after polling a future twice, if the notifier is woken all Wakers are dropped.
+ let mut future_a = notifier.get_future();
+ assert_eq!(Pin::new(&mut future_a).poll(&mut Context::from_waker(&create_waker().1)), Poll::Pending);
+ assert_eq!(future_state.lock().unwrap().std_future_callbacks.len(), 1);
+ assert_eq!(Pin::new(&mut future_a).poll(&mut Context::from_waker(&create_waker().1)), Poll::Pending);
+ assert_eq!(future_state.lock().unwrap().std_future_callbacks.len(), 1);
+ notifier.notify();
+ assert_eq!(future_state.lock().unwrap().std_future_callbacks.len(), 0);
+ assert_eq!(Pin::new(&mut future_a).poll(&mut Context::from_waker(&create_waker().1)), Poll::Ready(()));
+ assert_eq!(future_state.lock().unwrap().std_future_callbacks.len(), 0);
+ }
}
# Obviously lightning-transaction-sync doesn't support no-std, but it should build
# even if lightning is built with no-std.
lightning-transaction-sync = { path = "../lightning-transaction-sync", optional = true }
+
+[patch.crates-io]
+possiblyrandom = { path = "../possiblyrandom" }
--- /dev/null
+## Bug Fixes
+
+* LDK previously would fail to forward an intermediate blinded payment
+ if the blinded hop features were absent, potentially breaking
+ interoperability.
--- /dev/null
+## Bug fixes
+
+* LDK previously serialized `PaymentRelay::fee_base_msat` as a u32 when it
+ should have been serialized as a tu32. Similarly, we were serializing
+ `PaymentConstraints::htlc_minimum_msat` as a u64 when we should have been
+ serializing it as tu64. This caused lack of interoperability when using other
+ implementations as forwarding nodes along blinded payment paths.
--- /dev/null
+[package]
+name = "possiblyrandom"
+version = "0.1.0"
+authors = ["Matt Corallo"]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/lightningdevkit/rust-lightning/"
+description = """
+A crate that wraps getrandom and always compiles, returning 0s when no randomness is available.
+"""
+edition = "2021"
+
+[package.metadata.docs.rs]
+all-features = true
+rustdoc-args = ["--cfg", "docsrs"]
+
+[dependencies]
+getrandom = { version = "0.2", optional = true, default-features = false }
+
+# Enable getrandom if we are on a platform that (likely) supports it
+[target.'cfg(not(any(target_os = "unknown", target_os = "none")))'.dependencies]
+getrandom = { version = "0.2", default-features = false }
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! [`getrandom`] provides access to OS randomness, but will fail to compile on platforms that do
+//! not support fetching OS randomness. This is exactly what you want when you're doing
+//! cryptographic operations, but when you're just opportunistically randomizing, we're fine with
+//! compiling and simply disabling randomization.
+//!
+//! This crate does that, returning only possibly-random data.
+//!
+//! Note that this crate only enables getrandom on a subset of platforms it supports. As getrandom
+//! evolves this crate is unlikely to carefully track all getrandom-supported platforms, however
+//! will use random data on popular platforms.
+
+#![no_std]
+
+#[cfg(feature = "getrandom")]
+extern crate getrandom;
+
+/// Possibly fills `dest` with random data. May fill it with zeros.
+#[inline]
+pub fn getpossiblyrandom(dest: &mut [u8]) {
+ #[cfg(feature = "getrandom")]
+ if getrandom::getrandom(dest).is_err() {
+ dest.fill(0);
+ }
+ #[cfg(not(feature = "getrandom"))]
+ dest.fill(0);
+}
-disable_all_formatting = true
\ No newline at end of file
+use_small_heuristics = "Max"
+fn_args_layout = "Compressed"
+hard_tabs = true
+use_field_init_shorthand = true
+max_width = 100
+match_block_trailing_comma = true
+# UNSTABLE: format_code_in_doc_comments = true
+# UNSTABLE: overflow_delimited_expr = true
+# UNSTABLE: comment_width = 100
+# UNSTABLE: format_macro_matchers = true
+# UNSTABLE: format_strings = true
+# UNSTABLE: group_imports = "StdExternalCrate"
--- /dev/null
+./bench/benches/bench.rs
+./fuzz/src/base32.rs
+./fuzz/src/bech32_parse.rs
+./fuzz/src/bin/base32_target.rs
+./fuzz/src/bin/bech32_parse_target.rs
+./fuzz/src/bin/chanmon_consistency_target.rs
+./fuzz/src/bin/chanmon_deser_target.rs
+./fuzz/src/bin/fromstr_to_netaddress_target.rs
+./fuzz/src/bin/full_stack_target.rs
+./fuzz/src/bin/indexedmap_target.rs
+./fuzz/src/bin/invoice_deser_target.rs
+./fuzz/src/bin/invoice_request_deser_target.rs
+./fuzz/src/bin/msg_accept_channel_target.rs
+./fuzz/src/bin/msg_accept_channel_v2_target.rs
+./fuzz/src/bin/msg_announcement_signatures_target.rs
+./fuzz/src/bin/msg_channel_announcement_target.rs
+./fuzz/src/bin/msg_channel_details_target.rs
+./fuzz/src/bin/msg_channel_ready_target.rs
+./fuzz/src/bin/msg_channel_reestablish_target.rs
+./fuzz/src/bin/msg_channel_update_target.rs
+./fuzz/src/bin/msg_closing_signed_target.rs
+./fuzz/src/bin/msg_commitment_signed_target.rs
+./fuzz/src/bin/msg_decoded_onion_error_packet_target.rs
+./fuzz/src/bin/msg_error_message_target.rs
+./fuzz/src/bin/msg_funding_created_target.rs
+./fuzz/src/bin/msg_funding_signed_target.rs
+./fuzz/src/bin/msg_gossip_timestamp_filter_target.rs
+./fuzz/src/bin/msg_init_target.rs
+./fuzz/src/bin/msg_node_announcement_target.rs
+./fuzz/src/bin/msg_open_channel_target.rs
+./fuzz/src/bin/msg_open_channel_v2_target.rs
+./fuzz/src/bin/msg_ping_target.rs
+./fuzz/src/bin/msg_pong_target.rs
+./fuzz/src/bin/msg_query_channel_range_target.rs
+./fuzz/src/bin/msg_query_short_channel_ids_target.rs
+./fuzz/src/bin/msg_reply_channel_range_target.rs
+./fuzz/src/bin/msg_reply_short_channel_ids_end_target.rs
+./fuzz/src/bin/msg_revoke_and_ack_target.rs
+./fuzz/src/bin/msg_shutdown_target.rs
+./fuzz/src/bin/msg_splice_ack_target.rs
+./fuzz/src/bin/msg_splice_locked_target.rs
+./fuzz/src/bin/msg_splice_target.rs
+./fuzz/src/bin/msg_stfu_target.rs
+./fuzz/src/bin/msg_tx_abort_target.rs
+./fuzz/src/bin/msg_tx_ack_rbf_target.rs
+./fuzz/src/bin/msg_tx_add_input_target.rs
+./fuzz/src/bin/msg_tx_add_output_target.rs
+./fuzz/src/bin/msg_tx_complete_target.rs
+./fuzz/src/bin/msg_tx_init_rbf_target.rs
+./fuzz/src/bin/msg_tx_remove_input_target.rs
+./fuzz/src/bin/msg_tx_remove_output_target.rs
+./fuzz/src/bin/msg_tx_signatures_target.rs
+./fuzz/src/bin/msg_update_add_htlc_target.rs
+./fuzz/src/bin/msg_update_fail_htlc_target.rs
+./fuzz/src/bin/msg_update_fail_malformed_htlc_target.rs
+./fuzz/src/bin/msg_update_fee_target.rs
+./fuzz/src/bin/msg_update_fulfill_htlc_target.rs
+./fuzz/src/bin/offer_deser_target.rs
+./fuzz/src/bin/onion_hop_data_target.rs
+./fuzz/src/bin/onion_message_target.rs
+./fuzz/src/bin/peer_crypt_target.rs
+./fuzz/src/bin/process_network_graph_target.rs
+./fuzz/src/bin/refund_deser_target.rs
+./fuzz/src/bin/router_target.rs
+./fuzz/src/bin/zbase32_target.rs
+./fuzz/src/chanmon_consistency.rs
+./fuzz/src/chanmon_deser.rs
+./fuzz/src/fromstr_to_netaddress.rs
+./fuzz/src/full_stack.rs
+./fuzz/src/indexedmap.rs
+./fuzz/src/invoice_deser.rs
+./fuzz/src/invoice_request_deser.rs
+./fuzz/src/lib.rs
+./fuzz/src/msg_targets/mod.rs
+./fuzz/src/msg_targets/msg_accept_channel.rs
+./fuzz/src/msg_targets/msg_accept_channel_v2.rs
+./fuzz/src/msg_targets/msg_announcement_signatures.rs
+./fuzz/src/msg_targets/msg_channel_announcement.rs
+./fuzz/src/msg_targets/msg_channel_details.rs
+./fuzz/src/msg_targets/msg_channel_ready.rs
+./fuzz/src/msg_targets/msg_channel_reestablish.rs
+./fuzz/src/msg_targets/msg_channel_update.rs
+./fuzz/src/msg_targets/msg_closing_signed.rs
+./fuzz/src/msg_targets/msg_commitment_signed.rs
+./fuzz/src/msg_targets/msg_decoded_onion_error_packet.rs
+./fuzz/src/msg_targets/msg_error_message.rs
+./fuzz/src/msg_targets/msg_funding_created.rs
+./fuzz/src/msg_targets/msg_funding_signed.rs
+./fuzz/src/msg_targets/msg_gossip_timestamp_filter.rs
+./fuzz/src/msg_targets/msg_init.rs
+./fuzz/src/msg_targets/msg_node_announcement.rs
+./fuzz/src/msg_targets/msg_open_channel.rs
+./fuzz/src/msg_targets/msg_open_channel_v2.rs
+./fuzz/src/msg_targets/msg_ping.rs
+./fuzz/src/msg_targets/msg_pong.rs
+./fuzz/src/msg_targets/msg_query_channel_range.rs
+./fuzz/src/msg_targets/msg_query_short_channel_ids.rs
+./fuzz/src/msg_targets/msg_reply_channel_range.rs
+./fuzz/src/msg_targets/msg_reply_short_channel_ids_end.rs
+./fuzz/src/msg_targets/msg_revoke_and_ack.rs
+./fuzz/src/msg_targets/msg_shutdown.rs
+./fuzz/src/msg_targets/msg_splice.rs
+./fuzz/src/msg_targets/msg_splice_ack.rs
+./fuzz/src/msg_targets/msg_splice_locked.rs
+./fuzz/src/msg_targets/msg_stfu.rs
+./fuzz/src/msg_targets/msg_tx_abort.rs
+./fuzz/src/msg_targets/msg_tx_ack_rbf.rs
+./fuzz/src/msg_targets/msg_tx_add_input.rs
+./fuzz/src/msg_targets/msg_tx_add_output.rs
+./fuzz/src/msg_targets/msg_tx_complete.rs
+./fuzz/src/msg_targets/msg_tx_init_rbf.rs
+./fuzz/src/msg_targets/msg_tx_remove_input.rs
+./fuzz/src/msg_targets/msg_tx_remove_output.rs
+./fuzz/src/msg_targets/msg_tx_signatures.rs
+./fuzz/src/msg_targets/msg_update_add_htlc.rs
+./fuzz/src/msg_targets/msg_update_fail_htlc.rs
+./fuzz/src/msg_targets/msg_update_fail_malformed_htlc.rs
+./fuzz/src/msg_targets/msg_update_fee.rs
+./fuzz/src/msg_targets/msg_update_fulfill_htlc.rs
+./fuzz/src/msg_targets/msg_warning_message.rs
+./fuzz/src/msg_targets/utils.rs
+./fuzz/src/offer_deser.rs
+./fuzz/src/onion_hop_data.rs
+./fuzz/src/onion_message.rs
+./fuzz/src/peer_crypt.rs
+./fuzz/src/process_network_graph.rs
+./fuzz/src/refund_deser.rs
+./fuzz/src/router.rs
+./fuzz/src/utils/mod.rs
+./fuzz/src/utils/test_logger.rs
+./fuzz/src/utils/test_persister.rs
+./fuzz/src/zbase32.rs
+./lightning-background-processor/src/lib.rs
+./lightning-block-sync/src/convert.rs
+./lightning-block-sync/src/gossip.rs
+./lightning-block-sync/src/http.rs
+./lightning-block-sync/src/init.rs
+./lightning-block-sync/src/lib.rs
+./lightning-block-sync/src/poll.rs
+./lightning-block-sync/src/rest.rs
+./lightning-block-sync/src/rpc.rs
+./lightning-block-sync/src/test_utils.rs
+./lightning-block-sync/src/utils.rs
+./lightning-custom-message/src/lib.rs
+./lightning-invoice/fuzz/fuzz_targets/serde_data_part.rs
+./lightning-invoice/src/de.rs
+./lightning-invoice/src/lib.rs
+./lightning-invoice/src/payment.rs
+./lightning-invoice/src/ser.rs
+./lightning-invoice/src/tb.rs
+./lightning-invoice/src/utils.rs
+./lightning-invoice/tests/ser_de.rs
+./lightning-net-tokio/src/lib.rs
+./lightning-persister/src/fs_store.rs
+./lightning-persister/src/lib.rs
+./lightning-persister/src/test_utils.rs
+./lightning-persister/src/utils.rs
+./lightning-rapid-gossip-sync/src/error.rs
+./lightning-rapid-gossip-sync/src/lib.rs
+./lightning-rapid-gossip-sync/src/processing.rs
+./lightning-transaction-sync/src/common.rs
+./lightning-transaction-sync/src/electrum.rs
+./lightning-transaction-sync/src/error.rs
+./lightning-transaction-sync/src/esplora.rs
+./lightning-transaction-sync/src/lib.rs
+./lightning-transaction-sync/tests/integration_tests.rs
+./lightning/src/blinded_path/message.rs
+./lightning/src/blinded_path/mod.rs
+./lightning/src/blinded_path/payment.rs
+./lightning/src/blinded_path/utils.rs
+./lightning/src/chain/chaininterface.rs
+./lightning/src/chain/chainmonitor.rs
+./lightning/src/chain/channelmonitor.rs
+./lightning/src/chain/mod.rs
+./lightning/src/chain/onchaintx.rs
+./lightning/src/chain/package.rs
+./lightning/src/chain/transaction.rs
+./lightning/src/crypto/chacha20.rs
+./lightning/src/crypto/chacha20poly1305rfc.rs
+./lightning/src/crypto/mod.rs
+./lightning/src/crypto/poly1305.rs
+./lightning/src/crypto/streams.rs
+./lightning/src/crypto/utils.rs
+./lightning/src/events/bump_transaction.rs
+./lightning/src/events/mod.rs
+./lightning/src/lib.rs
+./lightning/src/ln/async_signer_tests.rs
+./lightning/src/ln/blinded_payment_tests.rs
+./lightning/src/ln/chan_utils.rs
+./lightning/src/ln/chanmon_update_fail_tests.rs
+./lightning/src/ln/channel.rs
+./lightning/src/ln/channel_id.rs
+./lightning/src/ln/channelmanager.rs
+./lightning/src/ln/features.rs
+./lightning/src/ln/functional_test_utils.rs
+./lightning/src/ln/functional_tests.rs
+./lightning/src/ln/inbound_payment.rs
+./lightning/src/ln/mod.rs
+./lightning/src/ln/monitor_tests.rs
+./lightning/src/ln/msgs.rs
+./lightning/src/ln/offers_tests.rs
+./lightning/src/ln/onion_payment.rs
+./lightning/src/ln/onion_route_tests.rs
+./lightning/src/ln/outbound_payment.rs
+./lightning/src/ln/payment_tests.rs
+./lightning/src/ln/peer_channel_encryptor.rs
+./lightning/src/ln/peer_handler.rs
+./lightning/src/ln/priv_short_conf_tests.rs
+./lightning/src/ln/reload_tests.rs
+./lightning/src/ln/reorg_tests.rs
+./lightning/src/ln/script.rs
+./lightning/src/ln/shutdown_tests.rs
+./lightning/src/ln/wire.rs
+./lightning/src/offers/invoice.rs
+./lightning/src/offers/invoice_error.rs
+./lightning/src/offers/invoice_request.rs
+./lightning/src/offers/merkle.rs
+./lightning/src/offers/mod.rs
+./lightning/src/offers/offer.rs
+./lightning/src/offers/parse.rs
+./lightning/src/offers/payer.rs
+./lightning/src/offers/refund.rs
+./lightning/src/offers/signer.rs
+./lightning/src/offers/test_utils.rs
+./lightning/src/onion_message/functional_tests.rs
+./lightning/src/onion_message/messenger.rs
+./lightning/src/onion_message/mod.rs
+./lightning/src/onion_message/offers.rs
+./lightning/src/onion_message/packet.rs
+./lightning/src/routing/gossip.rs
+./lightning/src/routing/mod.rs
+./lightning/src/routing/router.rs
+./lightning/src/routing/scoring.rs
+./lightning/src/routing/test_utils.rs
+./lightning/src/routing/utxo.rs
+./lightning/src/sync/debug_sync.rs
+./lightning/src/sync/fairrwlock.rs
+./lightning/src/sync/mod.rs
+./lightning/src/sync/nostd_sync.rs
+./lightning/src/sync/test_lockorder_checks.rs
+./lightning/src/util/atomic_counter.rs
+./lightning/src/util/base32.rs
+./lightning/src/util/byte_utils.rs
+./lightning/src/util/config.rs
+./lightning/src/util/errors.rs
+./lightning/src/util/fuzz_wrappers.rs
+./lightning/src/util/indexed_map.rs
+./lightning/src/util/invoice.rs
+./lightning/src/util/logger.rs
+./lightning/src/util/macro_logger.rs
+./lightning/src/util/message_signing.rs
+./lightning/src/util/mod.rs
+./lightning/src/util/persist.rs
+./lightning/src/util/scid_utils.rs
+./lightning/src/util/ser.rs
+./lightning/src/util/ser_macros.rs
+./lightning/src/util/string.rs
+./lightning/src/util/test_channel_signer.rs
+./lightning/src/util/test_utils.rs
+./lightning/src/util/time.rs
+./lightning/src/util/transaction_utils.rs
+./lightning/src/util/wakers.rs