- name: Pin tokio to 1.14 for Rust 1.45
if: "matrix.build-net-old-tokio"
run: cargo update -p tokio --precise "1.14.0" --verbose
+ env:
+ CARGO_NET_GIT_FETCH_WITH_CLI: "true"
- name: Build on Rust ${{ matrix.toolchain }} with net-tokio
if: "matrix.build-net-tokio && !matrix.coverage"
run: cargo build --verbose --color always
afl = { version = "0.4", optional = true }
lightning = { path = "../lightning", features = ["regex"] }
lightning-rapid-gossip-sync = { path = "../lightning-rapid-gossip-sync" }
-bitcoin = { version = "0.28.1", features = ["secp-lowmemory"] }
+bitcoin = { version = "0.29.0", features = ["secp-lowmemory"] }
hex = "0.3"
-honggfuzz = { version = "0.5", optional = true }
+honggfuzz = { version = "0.5", optional = true, default-features = false }
libfuzzer-sys = { version = "0.4", optional = true }
[build-dependencies]
[ "$(git diff)" != "" ] && exit 1
popd
-cargo install --color always --force honggfuzz
+cargo install --color always --force honggfuzz --no-default-features
sed -i 's/lto = true//' Cargo.toml
HFUZZ_BUILD_ARGS="--features honggfuzz_fuzz" cargo --color always hfuzz build
for TARGET in src/bin/*.rs; do
GEN_TEST chanmon_deser
GEN_TEST chanmon_consistency
GEN_TEST full_stack
+GEN_TEST onion_message
GEN_TEST peer_crypt
GEN_TEST process_network_graph
GEN_TEST router
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+// This file is auto-generated by gen_target.sh based on target_template.txt
+// To modify it, modify target_template.txt and run gen_target.sh instead.
+
+#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+
+#[cfg(not(fuzzing))]
+compile_error!("Fuzz targets need cfg=fuzzing");
+
+extern crate lightning_fuzz;
+use lightning_fuzz::onion_message::*;
+
+#[cfg(feature = "afl")]
+#[macro_use] extern crate afl;
+#[cfg(feature = "afl")]
+fn main() {
+ fuzz!(|data| {
+ onion_message_run(data.as_ptr(), data.len());
+ });
+}
+
+#[cfg(feature = "honggfuzz")]
+#[macro_use] extern crate honggfuzz;
+#[cfg(feature = "honggfuzz")]
+fn main() {
+ loop {
+ fuzz!(|data| {
+ onion_message_run(data.as_ptr(), data.len());
+ });
+ }
+}
+
+#[cfg(feature = "libfuzzer_fuzz")]
+#[macro_use] extern crate libfuzzer_sys;
+#[cfg(feature = "libfuzzer_fuzz")]
+fuzz_target!(|data: &[u8]| {
+ onion_message_run(data.as_ptr(), data.len());
+});
+
+#[cfg(feature = "stdin_fuzz")]
+fn main() {
+ use std::io::Read;
+
+ let mut data = Vec::with_capacity(8192);
+ std::io::stdin().read_to_end(&mut data).unwrap();
+ onion_message_run(data.as_ptr(), data.len());
+}
+
+#[test]
+fn run_test_cases() {
+ use std::fs;
+ use std::io::Read;
+ use lightning_fuzz::utils::test_logger::StringBuffer;
+
+ use std::sync::{atomic, Arc};
+ {
+ let data: Vec<u8> = vec![0];
+ onion_message_run(data.as_ptr(), data.len());
+ }
+ let mut threads = Vec::new();
+ let threads_running = Arc::new(atomic::AtomicUsize::new(0));
+ if let Ok(tests) = fs::read_dir("test_cases/onion_message") {
+ for test in tests {
+ let mut data: Vec<u8> = Vec::new();
+ let path = test.unwrap().path();
+ fs::File::open(&path).unwrap().read_to_end(&mut data).unwrap();
+ threads_running.fetch_add(1, atomic::Ordering::AcqRel);
+
+ let thread_count_ref = Arc::clone(&threads_running);
+ let main_thread_ref = std::thread::current();
+ threads.push((path.file_name().unwrap().to_str().unwrap().to_string(),
+ std::thread::spawn(move || {
+ let string_logger = StringBuffer::new();
+
+ let panic_logger = string_logger.clone();
+ let res = if ::std::panic::catch_unwind(move || {
+ onion_message_test(&data, panic_logger);
+ }).is_err() {
+ Some(string_logger.into_string())
+ } else { None };
+ thread_count_ref.fetch_sub(1, atomic::Ordering::AcqRel);
+ main_thread_ref.unpark();
+ res
+ })
+ ));
+ while threads_running.load(atomic::Ordering::Acquire) > 32 {
+ std::thread::park();
+ }
+ }
+ }
+ let mut failed_outputs = Vec::new();
+ for (test, thread) in threads.drain(..) {
+ if let Some(output) = thread.join().unwrap() {
+ println!("\nOutput of {}:\n{}\n", test, output);
+ failed_outputs.push(test);
+ }
+ }
+ if !failed_outputs.is_empty() {
+ println!("Test cases which failed: ");
+ for case in failed_outputs {
+ println!("{}", case);
+ }
+ panic!();
+ }
+}
//! send-side handling is correct, other peers. We consider it a failure if any action results in a
//! channel being force-closed.
+use bitcoin::TxMerkleNode;
use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::blockdata::transaction::{Transaction, TxOut};
use bitcoin::blockdata::script::{Builder, Script};
use bitcoin::blockdata::opcodes;
+use bitcoin::blockdata::locktime::PackedLockTime;
use bitcoin::network::constants::Network;
use bitcoin::hashes::Hash as TraitImport;
use utils::test_logger::{self, Output};
use utils::test_persister::TestPersister;
-use bitcoin::secp256k1::{PublicKey,SecretKey};
+use bitcoin::secp256k1::{PublicKey, SecretKey, Scalar};
use bitcoin::secp256k1::ecdh::SharedSecret;
use bitcoin::secp256k1::ecdsa::RecoverableSignature;
use bitcoin::secp256k1::Secp256k1;
Ok(SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, self.node_id]).unwrap())
}
- fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&[u8; 32]>) -> Result<SharedSecret, ()> {
+ fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result<SharedSecret, ()> {
let mut node_secret = self.get_node_secret(recipient)?;
if let Some(tweak) = tweak {
- node_secret.mul_assign(tweak).map_err(|_| ())?;
+ node_secret = node_secret.mul_tweak(tweak).unwrap();
}
Ok(SharedSecret::new(other_key, &node_secret))
}
let events = $source.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
if let events::Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, .. } = events[0] {
- let tx = Transaction { version: $chan_id, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+ let tx = Transaction { version: $chan_id, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
value: *channel_value_satoshis, script_pubkey: output_script.clone(),
}]};
funding_output = OutPoint { txid: tx.txid(), index: 0 };
macro_rules! confirm_txn {
($node: expr) => { {
let chain_hash = genesis_block(Network::Bitcoin).block_hash();
- let mut header = BlockHeader { version: 0x20000000, prev_blockhash: chain_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let mut header = BlockHeader { version: 0x20000000, prev_blockhash: chain_hash, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
let txdata: Vec<_> = channel_txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect();
$node.transactions_confirmed(&header, &txdata, 1);
for _ in 2..100 {
- header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
}
$node.best_block_updated(&header, 99);
} }
//! or payments to send/ways to handle events generated.
//! This test has been very useful, though due to its complexity good starting inputs are critical.
+use bitcoin::TxMerkleNode;
use bitcoin::blockdata::block::BlockHeader;
+use bitcoin::blockdata::constants::genesis_block;
use bitcoin::blockdata::transaction::{Transaction, TxOut};
use bitcoin::blockdata::script::{Builder, Script};
use bitcoin::blockdata::opcodes;
+use bitcoin::blockdata::locktime::PackedLockTime;
use bitcoin::consensus::encode::deserialize;
use bitcoin::network::constants::Network;
-use bitcoin::blockdata::constants::genesis_block;
use bitcoin::hashes::Hash as TraitImport;
use bitcoin::hashes::HashEngine as TraitImportEngine;
use utils::test_logger;
use utils::test_persister::TestPersister;
-use bitcoin::secp256k1::{PublicKey,SecretKey};
+use bitcoin::secp256k1::{PublicKey, SecretKey, Scalar};
use bitcoin::secp256k1::ecdh::SharedSecret;
use bitcoin::secp256k1::ecdsa::RecoverableSignature;
use bitcoin::secp256k1::Secp256k1;
EnforcingSigner,
Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>, Arc<dyn Logger>>;
-type PeerMan<'a> = PeerManager<Peer<'a>, Arc<ChannelMan>, Arc<P2PGossipSync<Arc<NetworkGraph<Arc<dyn Logger>>>, Arc<dyn chain::Access>, Arc<dyn Logger>>>, Arc<dyn Logger>, IgnoringMessageHandler>;
+type PeerMan<'a> = PeerManager<Peer<'a>, Arc<ChannelMan>, Arc<P2PGossipSync<Arc<NetworkGraph<Arc<dyn Logger>>>, Arc<dyn chain::Access>, Arc<dyn Logger>>>, IgnoringMessageHandler, Arc<dyn Logger>, IgnoringMessageHandler>;
struct MoneyLossDetector<'a> {
manager: Arc<ChannelMan>,
}
self.blocks_connected += 1;
- let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height].0, merkle_root: Default::default(), time: self.blocks_connected, bits: 42, nonce: 42 };
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height].0, merkle_root: TxMerkleNode::all_zeros(), time: self.blocks_connected, bits: 42, nonce: 42 };
self.height += 1;
self.manager.transactions_confirmed(&header, &txdata, self.height as u32);
self.manager.best_block_updated(&header, self.height as u32);
fn disconnect_block(&mut self) {
if self.height > 0 && (self.max_height < 6 || self.height >= self.max_height - 6) {
- let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height - 1].0, merkle_root: Default::default(), time: self.header_hashes[self.height].1, bits: 42, nonce: 42 };
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height - 1].0, merkle_root: TxMerkleNode::all_zeros(), time: self.header_hashes[self.height].1, bits: 42, nonce: 42 };
self.manager.block_disconnected(&header, self.height as u32);
self.monitor.block_disconnected(&header, self.height as u32);
self.height -= 1;
Ok(self.node_secret.clone())
}
- fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&[u8; 32]>) -> Result<SharedSecret, ()> {
+ fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result<SharedSecret, ()> {
let mut node_secret = self.get_node_secret(recipient)?;
if let Some(tweak) = tweak {
- node_secret.mul_assign(tweak).map_err(|_| ())?;
+ node_secret = node_secret.mul_tweak(tweak).unwrap();
}
Ok(SharedSecret::new(other_key, &node_secret))
}
let mut loss_detector = MoneyLossDetector::new(&peers, channelmanager.clone(), monitor.clone(), PeerManager::new(MessageHandler {
chan_handler: channelmanager.clone(),
route_handler: gossip_sync.clone(),
+ onion_message_handler: IgnoringMessageHandler {},
}, our_network_key, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0], Arc::clone(&logger), IgnoringMessageHandler{}));
let mut should_forward = false;
},
10 => {
'outer_loop: for funding_generation in pending_funding_generation.drain(..) {
- let mut tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+ let mut tx = Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
value: funding_generation.2, script_pubkey: funding_generation.3,
}] };
let funding_output = 'search_loop: loop {
pub mod chanmon_deser;
pub mod chanmon_consistency;
pub mod full_stack;
+pub mod onion_message;
pub mod peer_crypt;
pub mod process_network_graph;
pub mod router;
--- /dev/null
+// Imports that need to be added manually
+use bitcoin::bech32::u5;
+use bitcoin::blockdata::script::Script;
+use bitcoin::secp256k1::{PublicKey, Scalar, SecretKey};
+use bitcoin::secp256k1::ecdh::SharedSecret;
+use bitcoin::secp256k1::ecdsa::RecoverableSignature;
+
+use lightning::chain::keysinterface::{Recipient, KeyMaterial, KeysInterface};
+use lightning::ln::msgs::{self, DecodeError, OnionMessageHandler};
+use lightning::ln::script::ShutdownScript;
+use lightning::util::enforcing_trait_impls::EnforcingSigner;
+use lightning::util::logger::Logger;
+use lightning::util::ser::{Readable, Writer};
+use lightning::onion_message::OnionMessenger;
+
+use utils::test_logger;
+
+use std::io::Cursor;
+use std::sync::atomic::{AtomicU64, Ordering};
+
+#[inline]
+/// Actual fuzz test, method signature and name are fixed
+pub fn do_test<L: Logger>(data: &[u8], logger: &L) {
+ if let Ok(msg) = <msgs::OnionMessage as Readable>::read(&mut Cursor::new(data)) {
+ let mut secret_bytes = [0; 32];
+ secret_bytes[31] = 2;
+ let secret = SecretKey::from_slice(&secret_bytes).unwrap();
+ let keys_manager = KeyProvider {
+ node_secret: secret,
+ counter: AtomicU64::new(0),
+ };
+ let onion_messenger = OnionMessenger::new(&keys_manager, logger);
+ let mut pk = [2; 33]; pk[1] = 0xff;
+ let peer_node_id_not_used = PublicKey::from_slice(&pk).unwrap();
+ onion_messenger.handle_onion_message(&peer_node_id_not_used, &msg);
+ }
+}
+
+/// Method that needs to be added manually, {name}_test
+pub fn onion_message_test<Out: test_logger::Output>(data: &[u8], out: Out) {
+ let logger = test_logger::TestLogger::new("".to_owned(), out);
+ do_test(data, &logger);
+}
+
+/// Method that needs to be added manually, {name}_run
+#[no_mangle]
+pub extern "C" fn onion_message_run(data: *const u8, datalen: usize) {
+ let logger = test_logger::TestLogger::new("".to_owned(), test_logger::DevNull {});
+ do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, &logger);
+}
+
+pub struct VecWriter(pub Vec<u8>);
+impl Writer for VecWriter {
+ fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
+ self.0.extend_from_slice(buf);
+ Ok(())
+ }
+}
+struct KeyProvider {
+ node_secret: SecretKey,
+ counter: AtomicU64,
+}
+impl KeysInterface for KeyProvider {
+ type Signer = EnforcingSigner;
+
+ fn get_node_secret(&self, _recipient: Recipient) -> Result<SecretKey, ()> {
+ Ok(self.node_secret.clone())
+ }
+
+ fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result<SharedSecret, ()> {
+ let mut node_secret = self.get_node_secret(recipient)?;
+ if let Some(tweak) = tweak {
+ node_secret = node_secret.mul_tweak(tweak).map_err(|_| ())?;
+ }
+ Ok(SharedSecret::new(other_key, &node_secret))
+ }
+
+ fn get_inbound_payment_key_material(&self) -> KeyMaterial { unreachable!() }
+
+ fn get_destination_script(&self) -> Script { unreachable!() }
+
+ fn get_shutdown_scriptpubkey(&self) -> ShutdownScript { unreachable!() }
+
+ fn get_channel_signer(&self, _inbound: bool, _channel_value_satoshis: u64) -> EnforcingSigner {
+ unreachable!()
+ }
+
+ fn get_secure_random_bytes(&self) -> [u8; 32] {
+ let ctr = self.counter.fetch_add(1, Ordering::Relaxed);
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ (ctr >> 8*7) as u8, (ctr >> 8*6) as u8, (ctr >> 8*5) as u8, (ctr >> 8*4) as u8, (ctr >> 8*3) as u8, (ctr >> 8*2) as u8, (ctr >> 8*1) as u8, 14, (ctr >> 8*0) as u8]
+ }
+
+ fn read_chan_signer(&self, _data: &[u8]) -> Result<EnforcingSigner, DecodeError> { unreachable!() }
+
+ fn sign_invoice(&self, _hrp_bytes: &[u8], _invoice_data: &[u5], _recipient: Recipient) -> Result<RecoverableSignature, ()> {
+ unreachable!()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use lightning::util::logger::{Logger, Record};
+ use std::collections::HashMap;
+ use std::sync::Mutex;
+
+ struct TrackingLogger {
+ /// (module, message) -> count
+ pub lines: Mutex<HashMap<(String, String), usize>>,
+ }
+ impl Logger for TrackingLogger {
+ fn log(&self, record: &Record) {
+ *self.lines.lock().unwrap().entry((record.module_path.to_string(), format!("{}", record.args))).or_insert(0) += 1;
+ println!("{:<5} [{} : {}, {}] {}", record.level.to_string(), record.module_path, record.file, record.line, record.args);
+ }
+ }
+
+ #[test]
+ fn test_no_onion_message_breakage() {
+ let one_hop_om = "020000000000000000000000000000000000000000000000000000000000000e01055600020000000000000000000000000000000000000000000000000000000000000e01120410950000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000009300000000000000000000000000000000000000000000000000000000000000";
+ let logger = TrackingLogger { lines: Mutex::new(HashMap::new()) };
+ super::do_test(&::hex::decode(one_hop_om).unwrap(), &logger);
+ {
+ let log_entries = logger.lines.lock().unwrap();
+ assert_eq!(log_entries.get(&("lightning::onion_message::messenger".to_string(), "Received an onion message with path_id: None and no reply_path".to_string())), Some(&1));
+ }
+
+ let two_unblinded_hops_om = "020000000000000000000000000000000000000000000000000000000000000e01055600020000000000000000000000000000000000000000000000000000000000000e0135043304210200000000000000000000000000000000000000000000000000000000000000039500000000000000000000000000000058000000000000000000000000000000000000000000000000000000000000001204105e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b300000000000000000000000000000000000000000000000000000000000000";
+ let logger = TrackingLogger { lines: Mutex::new(HashMap::new()) };
+ super::do_test(&::hex::decode(two_unblinded_hops_om).unwrap(), &logger);
+ {
+ let log_entries = logger.lines.lock().unwrap();
+ assert_eq!(log_entries.get(&("lightning::onion_message::messenger".to_string(), "Forwarding an onion message to peer 020000000000000000000000000000000000000000000000000000000000000003".to_string())), Some(&1));
+ }
+
+ let two_unblinded_two_blinded_om = "020000000000000000000000000000000000000000000000000000000000000e01055600020000000000000000000000000000000000000000000000000000000000000e01350433042102000000000000000000000000000000000000000000000000000000000000000395000000000000000000000000000000530000000000000000000000000000000000000000000000000000000000000058045604210200000000000000000000000000000000000000000000000000000000000000040821020000000000000000000000000000000000000000000000000000000000000e015e0000000000000000000000000000006b0000000000000000000000000000000000000000000000000000000000000035043304210200000000000000000000000000000000000000000000000000000000000000054b000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000120410ee00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b300000000000000000000000000000000000000000000000000000000000000";
+ let logger = TrackingLogger { lines: Mutex::new(HashMap::new()) };
+ super::do_test(&::hex::decode(two_unblinded_two_blinded_om).unwrap(), &logger);
+ {
+ let log_entries = logger.lines.lock().unwrap();
+ assert_eq!(log_entries.get(&("lightning::onion_message::messenger".to_string(), "Forwarding an onion message to peer 020000000000000000000000000000000000000000000000000000000000000003".to_string())), Some(&1));
+ }
+
+ let three_blinded_om = "020000000000000000000000000000000000000000000000000000000000000e01055600020000000000000000000000000000000000000000000000000000000000000e013504330421020000000000000000000000000000000000000000000000000000000000000003950000000000000000000000000000007f0000000000000000000000000000000000000000000000000000000000000035043304210200000000000000000000000000000000000000000000000000000000000000045e0000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000001204104a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b300000000000000000000000000000000000000000000000000000000000000";
+ let logger = TrackingLogger { lines: Mutex::new(HashMap::new()) };
+ super::do_test(&::hex::decode(three_blinded_om).unwrap(), &logger);
+ {
+ let log_entries = logger.lines.lock().unwrap();
+ assert_eq!(log_entries.get(&("lightning::onion_message::messenger".to_string(), "Forwarding an onion message to peer 020000000000000000000000000000000000000000000000000000000000000003".to_string())), Some(&1));
+ }
+ }
+}
// Imports that need to be added manually
use lightning_rapid_gossip_sync::RapidGossipSync;
+use bitcoin::hashes::Hash as TraitImport;
use utils::test_logger;
/// Actual fuzz test, method signature and name are fixed
fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
- let block_hash = bitcoin::BlockHash::default();
+ let block_hash = bitcoin::BlockHash::all_zeros();
let logger = test_logger::TestLogger::new("".to_owned(), out);
let network_graph = lightning::routing::gossip::NetworkGraph::new(block_hash, &logger);
let rapid_sync = RapidGossipSync::new(&network_graph);
void chanmon_deser_run(const unsigned char* data, size_t data_len);
void chanmon_consistency_run(const unsigned char* data, size_t data_len);
void full_stack_run(const unsigned char* data, size_t data_len);
+void onion_message_run(const unsigned char* data, size_t data_len);
void peer_crypt_run(const unsigned char* data, size_t data_len);
void process_network_graph_run(const unsigned char* data, size_t data_len);
void router_run(const unsigned char* data, size_t data_len);
rustdoc-args = ["--cfg", "docsrs"]
[dependencies]
-bitcoin = "0.28.1"
+bitcoin = "0.29.0"
lightning = { version = "0.0.110", path = "../lightning", features = ["std"] }
lightning-rapid-gossip-sync = { version = "0.0.110", path = "../lightning-rapid-gossip-sync" }
futures = { version = "0.3", optional = true }
//! running properly, and (2) either can or should be run in the background. See docs for
//! [`BackgroundProcessor`] for more details on the nitty-gritty.
+// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
#![deny(broken_intra_doc_links)]
+#![deny(private_intra_doc_links)]
+
#![deny(missing_docs)]
#![deny(unsafe_code)]
use lightning::chain::chainmonitor::{ChainMonitor, Persist};
use lightning::chain::keysinterface::{Sign, KeysInterface};
use lightning::ln::channelmanager::ChannelManager;
-use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
+use lightning::ln::msgs::{ChannelMessageHandler, OnionMessageHandler, RoutingMessageHandler};
use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor};
use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
use lightning::routing::scoring::WriteableScore;
P: 'static + Deref + Send + Sync,
Descriptor: 'static + SocketDescriptor + Send + Sync,
CMH: 'static + Deref + Send + Sync,
+ OMH: 'static + Deref + Send + Sync,
RMH: 'static + Deref + Send + Sync,
EH: 'static + EventHandler + Send,
PS: 'static + Deref + Send,
PGS: 'static + Deref<Target = P2PGossipSync<G, CA, L>> + Send + Sync,
RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
UMH: 'static + Deref + Send + Sync,
- PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, L, UMH>> + Send + Sync,
+ PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH>> + Send + Sync,
S: 'static + Deref<Target = SC> + Send + Sync,
SC: WriteableScore<'a>,
>(
L::Target: 'static + Logger,
P::Target: 'static + Persist<Signer>,
CMH::Target: 'static + ChannelMessageHandler,
+ OMH::Target: 'static + OnionMessageHandler,
RMH::Target: 'static + RoutingMessageHandler,
UMH::Target: 'static + CustomMessageHandler,
PS::Target: 'static + Persister<'a, Signer, CW, T, K, F, L, SC>,
mod tests {
use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::constants::genesis_block;
+ use bitcoin::blockdata::locktime::PackedLockTime;
use bitcoin::blockdata::transaction::{Transaction, TxOut};
use bitcoin::network::constants::Network;
use lightning::chain::{BestBlock, Confirm, chainmonitor};
use std::sync::{Arc, Mutex};
use std::sync::mpsc::SyncSender;
use std::time::Duration;
+ use bitcoin::hashes::Hash;
+ use bitcoin::TxMerkleNode;
use lightning::routing::scoring::{FixedPenaltyScorer};
use lightning_rapid_gossip_sync::RapidGossipSync;
use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
node: Arc<SimpleArcChannelManager<ChainMonitor, test_utils::TestBroadcaster, test_utils::TestFeeEstimator, test_utils::TestLogger>>,
p2p_gossip_sync: PGS,
rapid_gossip_sync: RGS,
- peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, Arc<test_utils::TestLogger>, IgnoringMessageHandler>>,
+ peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler>>,
chain_monitor: Arc<ChainMonitor>,
persister: Arc<FilesystemPersister>,
tx_broadcaster: Arc<test_utils::TestBroadcaster>,
let network_graph = Arc::new(NetworkGraph::new(genesis_block.header.block_hash(), logger.clone()));
let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone()));
- let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new() )};
+ let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()), onion_message_handler: IgnoringMessageHandler{}};
let peer_manager = Arc::new(PeerManager::new(msg_handler, keys_manager.get_node_secret(Recipient::Node).unwrap(), &seed, logger.clone(), IgnoringMessageHandler{}));
let scorer = Arc::new(Mutex::new(test_utils::TestScorer::with_penalty(0)));
let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
assert_eq!(channel_value_satoshis, $channel_value);
assert_eq!(user_channel_id, 42);
- let tx = Transaction { version: 1 as i32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+ let tx = Transaction { version: 1 as i32, lock_time: PackedLockTime(0), input: Vec::new(), output: vec![TxOut {
value: channel_value_satoshis, script_pubkey: output_script.clone(),
}]};
(temporary_channel_id, tx)
for i in 1..=depth {
let prev_blockhash = node.best_block.block_hash();
let height = node.best_block.height() + 1;
- let header = BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: Default::default(), time: height, bits: 42, nonce: 42 };
+ let header = BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 };
let txdata = vec![(0, tx)];
node.best_block = BestBlock::new(header.block_hash(), height);
match i {
rpc-client = [ "serde", "serde_json", "chunked_transfer" ]
[dependencies]
-bitcoin = "0.28.1"
+bitcoin = "0.29.0"
lightning = { version = "0.0.110", path = "../lightning" }
futures = { version = "0.3" }
tokio = { version = "1.0", features = [ "io-util", "net", "time" ], optional = true }
use std::convert::From;
use std::convert::TryFrom;
use std::convert::TryInto;
+use bitcoin::hashes::Hash;
/// Conversion from `std::io::Error` into `BlockSourceError`.
impl From<std::io::Error> for BlockSourceError {
// Add an empty previousblockhash for the genesis block.
if let None = header.get("previousblockhash") {
- let hash: BlockHash = Default::default();
+ let hash: BlockHash = BlockHash::all_zeros();
header.as_object_mut().unwrap().insert("previousblockhash".to_string(), serde_json::json!(hash.to_hex()));
}
//! Both features support either blocking I/O using `std::net::TcpStream` or, with feature `tokio`,
//! non-blocking I/O using `tokio::net::TcpStream` from inside a Tokio runtime.
+// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
#![deny(broken_intra_doc_links)]
+#![deny(private_intra_doc_links)]
+
#![deny(missing_docs)]
#![deny(unsafe_code)]
use bitcoin::network::constants::Network;
use bitcoin::util::uint::Uint256;
use bitcoin::util::hash::bitcoin_merkle_root;
-use bitcoin::Transaction;
+use bitcoin::{PackedLockTime, Transaction};
use lightning::chain;
// but that's OK because those tests don't trigger the check.
let coinbase = Transaction {
version: 0,
- lock_time: 0,
+ lock_time: PackedLockTime::ZERO,
input: vec![],
output: vec![]
};
std = ["bitcoin_hashes/std", "num-traits/std", "lightning/std", "bech32/std"]
[dependencies]
-bech32 = { version = "0.8", default-features = false }
+bech32 = { version = "0.9.0", default-features = false }
lightning = { version = "0.0.110", path = "../lightning", default-features = false }
-secp256k1 = { version = "0.22", default-features = false, features = ["recovery", "alloc"] }
+secp256k1 = { version = "0.24.0", default-features = false, features = ["recovery", "alloc"] }
num-traits = { version = "0.2.8", default-features = false }
-bitcoin_hashes = { version = "0.10", default-features = false }
+bitcoin_hashes = { version = "0.11", default-features = false }
hashbrown = { version = "0.11", optional = true }
core2 = { version = "0.3.0", default-features = false, optional = true }
serde = { version = "1.0.118", optional = true }
honggfuzz_fuzz = ["honggfuzz"]
[dependencies]
-honggfuzz = { version = "0.5", optional = true }
+honggfuzz = { version = "0.5", optional = true, default-features = false }
afl = { version = "0.4", optional = true }
lightning-invoice = { path = ".." }
lightning = { path = "../../lightning", features = ["regex"] }
-bech32 = "0.8"
+bech32 = "0.9.0"
# Prevent this from interfering with workspaces
[workspace]
#!/bin/bash
set -e
-cargo install --force honggfuzz
+cargo install --force honggfuzz --no-default-features
for TARGET in fuzz_targets/*; do
FILENAME=$(basename $TARGET)
FILE="${FILENAME%.*}"
+// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
+#![deny(broken_intra_doc_links)]
+#![deny(private_intra_doc_links)]
+
#![deny(missing_docs)]
#![deny(non_upper_case_globals)]
#![deny(non_camel_case_types)]
#![deny(non_snake_case)]
#![deny(unused_mut)]
-#![deny(broken_intra_doc_links)]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
/// 1. using `InvoiceBuilder`
/// 2. using `Invoice::from_signed(SignedRawInvoice)`
/// 3. using `str::parse::<Invoice>(&str)`
-#[derive(Eq, PartialEq, Debug, Clone)]
+#[derive(Eq, PartialEq, Debug, Clone, Hash)]
pub struct Invoice {
signed_invoice: SignedRawInvoice,
}
///
/// # Invariants
/// The hash has to be either from the deserialized invoice or from the serialized `raw_invoice`.
-#[derive(Eq, PartialEq, Debug, Clone)]
+#[derive(Eq, PartialEq, Debug, Clone, Hash)]
pub struct SignedRawInvoice {
/// The rawInvoice that the signature belongs to
raw_invoice: RawInvoice,
/// De- and encoding should not lead to information loss but may lead to different hashes.
///
/// For methods without docs see the corresponding methods in `Invoice`.
-#[derive(Eq, PartialEq, Debug, Clone)]
+#[derive(Eq, PartialEq, Debug, Clone, Hash)]
pub struct RawInvoice {
/// human readable part
pub hrp: RawHrp,
/// Data of the `RawInvoice` that is encoded in the human readable part
///
/// (C-not exported) As we don't yet support Option<Enum>
-#[derive(Eq, PartialEq, Debug, Clone)]
+#[derive(Eq, PartialEq, Debug, Clone, Hash)]
pub struct RawHrp {
/// The currency deferred from the 3rd and 4th character of the bech32 transaction
pub currency: Currency,
}
/// Data of the `RawInvoice` that is encoded in the data part
-#[derive(Eq, PartialEq, Debug, Clone)]
+#[derive(Eq, PartialEq, Debug, Clone, Hash)]
pub struct RawDataPart {
/// generation time of the invoice
pub timestamp: PositiveTimestamp,
///
/// The Unix timestamp representing the stored time has to be positive and no greater than
/// [`MAX_TIMESTAMP`].
-#[derive(Eq, PartialEq, Debug, Clone)]
+#[derive(Eq, PartialEq, Debug, Clone, Hash)]
pub struct PositiveTimestamp(Duration);
/// SI prefixes for the human readable part
-#[derive(Eq, PartialEq, Debug, Clone, Copy)]
+#[derive(Eq, PartialEq, Debug, Clone, Copy, Hash)]
pub enum SiPrefix {
/// 10^-3
Milli,
}
/// Recoverable signature
-#[derive(Clone, Debug, Eq, PartialEq)]
+#[derive(Clone, Debug, Hash, Eq, PartialEq)]
pub struct InvoiceSignature(pub RecoverableSignature);
/// Private routing information
//! # }
//! #
//! # struct FakeRouter {}
-//! # impl<S: Score> Router<S> for FakeRouter {
-//! # fn find_route(
+//! # impl Router for FakeRouter {
+//! # fn find_route<S: Score>(
//! # &self, payer: &PublicKey, params: &RouteParameters, payment_hash: &PaymentHash,
//! # first_hops: Option<&[&ChannelDetails]>, scorer: &S
//! # ) -> Result<Route, LightningError> { unimplemented!() }
use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
use lightning::ln::channelmanager::{ChannelDetails, PaymentId, PaymentSendFailure};
use lightning::ln::msgs::LightningError;
-use lightning::routing::scoring::{LockableScore, Score};
-use lightning::routing::router::{PaymentParameters, Route, RouteParameters};
+use lightning::routing::gossip::NodeId;
+use lightning::routing::scoring::{ChannelUsage, LockableScore, Score};
+use lightning::routing::router::{PaymentParameters, Route, RouteHop, RouteParameters};
+use lightning::util::errors::APIError;
use lightning::util::events::{Event, EventHandler};
use lightning::util::logger::Logger;
use time_utils::Time;
type ConfiguredTime = time_utils::Eternity;
/// (C-not exported) generally all users should use the [`InvoicePayer`] type alias.
-pub struct InvoicePayerUsingTime<P: Deref, R, S: Deref, L: Deref, E: EventHandler, T: Time>
+pub struct InvoicePayerUsingTime<P: Deref, R: Router, S: Deref, L: Deref, E: EventHandler, T: Time>
where
P::Target: Payer,
- R: for <'a> Router<<<S as Deref>::Target as LockableScore<'a>>::Locked>,
S::Target: for <'a> LockableScore<'a>,
L::Target: Logger,
{
logger: L,
event_handler: E,
/// Caches the overall attempts at making a payment, which is updated prior to retrying.
- payment_cache: Mutex<HashMap<PaymentHash, PaymentAttempts<T>>>,
+ payment_cache: Mutex<HashMap<PaymentHash, PaymentInfo<T>>>,
retry: Retry,
}
+/// Used by [`InvoicePayerUsingTime::payment_cache`] to track the payments that are either
+/// currently being made, or have outstanding paths that need retrying.
+struct PaymentInfo<T: Time> {
+ attempts: PaymentAttempts<T>,
+ paths: Vec<Vec<RouteHop>>,
+}
+
+impl<T: Time> PaymentInfo<T> {
+ fn new() -> Self {
+ PaymentInfo {
+ attempts: PaymentAttempts::new(),
+ paths: vec![],
+ }
+ }
+}
+
+/// Used to store information about all the HTLCs that are inflight across all payment attempts
+struct AccountForInFlightHtlcs<'a, S: Score> {
+ scorer: &'a mut S,
+ /// Maps a channel's short channel id and its direction to the liquidity used up.
+ inflight_htlcs: HashMap<(u64, bool), u64>,
+}
+
+#[cfg(c_bindings)]
+impl<'a, S:Score> lightning::util::ser::Writeable for AccountForInFlightHtlcs<'a, S> {
+ fn write<W: lightning::util::ser::Writer>(&self, writer: &mut W) -> Result<(), std::io::Error> { self.scorer.write(writer) }
+}
+
+impl<'a, S: Score> Score for AccountForInFlightHtlcs<'a, S> {
+ fn channel_penalty_msat(&self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage) -> u64 {
+ if let Some(used_liqudity) = self.inflight_htlcs.get(&(short_channel_id, source < target)) {
+ let usage = ChannelUsage {
+ inflight_htlc_msat: usage.inflight_htlc_msat + used_liqudity,
+ ..usage
+ };
+
+ self.scorer.channel_penalty_msat(short_channel_id, source, target, usage)
+ } else {
+ self.scorer.channel_penalty_msat(short_channel_id, source, target, usage)
+ }
+ }
+
+ fn payment_path_failed(&mut self, path: &[&RouteHop], short_channel_id: u64) {
+ self.scorer.payment_path_failed(path, short_channel_id)
+ }
+
+ fn payment_path_successful(&mut self, path: &[&RouteHop]) {
+ self.scorer.payment_path_successful(path)
+ }
+
+ fn probe_failed(&mut self, path: &[&RouteHop], short_channel_id: u64) {
+ self.scorer.probe_failed(path, short_channel_id)
+ }
+
+ fn probe_successful(&mut self, path: &[&RouteHop]) {
+ self.scorer.probe_successful(path)
+ }
+}
+
/// Storing minimal payment attempts information required for determining if a outbound payment can
/// be retried.
#[derive(Clone, Copy)]
}
/// A trait defining behavior for routing an [`Invoice`] payment.
-pub trait Router<S: Score> {
+pub trait Router {
/// Finds a [`Route`] between `payer` and `payee` for a payment with the given values.
- fn find_route(
+ fn find_route<S: Score>(
&self, payer: &PublicKey, route_params: &RouteParameters, payment_hash: &PaymentHash,
first_hops: Option<&[&ChannelDetails]>, scorer: &S
) -> Result<Route, LightningError>;
Sending(PaymentSendFailure),
}
-impl<P: Deref, R, S: Deref, L: Deref, E: EventHandler, T: Time> InvoicePayerUsingTime<P, R, S, L, E, T>
+impl<P: Deref, R: Router, S: Deref, L: Deref, E: EventHandler, T: Time> InvoicePayerUsingTime<P, R, S, L, E, T>
where
P::Target: Payer,
- R: for <'a> Router<<<S as Deref>::Target as LockableScore<'a>>::Locked>,
S::Target: for <'a> LockableScore<'a>,
L::Target: Logger,
{
let payment_hash = PaymentHash(invoice.payment_hash().clone().into_inner());
match self.payment_cache.lock().unwrap().entry(payment_hash) {
hash_map::Entry::Occupied(_) => return Err(PaymentError::Invoice("payment pending")),
- hash_map::Entry::Vacant(entry) => entry.insert(PaymentAttempts::new()),
+ hash_map::Entry::Vacant(entry) => entry.insert(PaymentInfo::new()),
};
let payment_secret = Some(invoice.payment_secret().clone());
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
match self.payment_cache.lock().unwrap().entry(payment_hash) {
hash_map::Entry::Occupied(_) => return Err(PaymentError::Invoice("payment pending")),
- hash_map::Entry::Vacant(entry) => entry.insert(PaymentAttempts::new()),
+ hash_map::Entry::Vacant(entry) => entry.insert(PaymentInfo::new()),
};
let route_params = RouteParameters {
let payer = self.payer.node_id();
let first_hops = self.payer.first_hops();
+ let inflight_htlcs = self.create_inflight_map();
let route = self.router.find_route(
- &payer, params, &payment_hash, Some(&first_hops.iter().collect::<Vec<_>>()),
- &self.scorer.lock()
+ &payer, ¶ms, &payment_hash, Some(&first_hops.iter().collect::<Vec<_>>()),
+ &AccountForInFlightHtlcs { scorer: &mut self.scorer.lock(), inflight_htlcs }
).map_err(|e| PaymentError::Routing(e))?;
match send_payment(&route) {
- Ok(payment_id) => Ok(payment_id),
+ Ok(payment_id) => {
+ for path in route.paths {
+ self.process_path_inflight_htlcs(payment_hash, path);
+ }
+ Ok(payment_id)
+ },
Err(e) => match e {
PaymentSendFailure::ParameterError(_) => Err(e),
PaymentSendFailure::PathParameterError(_) => Err(e),
PaymentSendFailure::AllFailedRetrySafe(_) => {
let mut payment_cache = self.payment_cache.lock().unwrap();
- let payment_attempts = payment_cache.get_mut(&payment_hash).unwrap();
- payment_attempts.count += 1;
- if self.retry.is_retryable_now(payment_attempts) {
+ let payment_info = payment_cache.get_mut(&payment_hash).unwrap();
+ payment_info.attempts.count += 1;
+ if self.retry.is_retryable_now(&payment_info.attempts) {
core::mem::drop(payment_cache);
Ok(self.pay_internal(params, payment_hash, send_payment)?)
} else {
Err(e)
}
},
- PaymentSendFailure::PartialFailure { failed_paths_retry, payment_id, .. } => {
+ PaymentSendFailure::PartialFailure { failed_paths_retry, payment_id, results } => {
+ // If a `PartialFailure` event returns a result that is an `Ok()`, it means that
+ // part of our payment is retried. When we receive `MonitorUpdateFailed`, it
+ // means that we are still waiting for our channel monitor update to be completed.
+ for (result, path) in results.iter().zip(route.paths.into_iter()) {
+ match result {
+ Ok(_) | Err(APIError::MonitorUpdateFailed) => {
+ self.process_path_inflight_htlcs(payment_hash, path);
+ },
+ _ => {},
+ }
+ }
+
if let Some(retry_data) = failed_paths_retry {
// Some paths were sent, even if we failed to send the full MPP value our
// recipient may misbehave and claim the funds, at which point we have to
}.map_err(|e| PaymentError::Sending(e))
}
+ // Takes in a path to have its information stored in `payment_cache`. This is done for paths
+ // that are pending retry.
+ fn process_path_inflight_htlcs(&self, payment_hash: PaymentHash, path: Vec<RouteHop>) {
+ self.payment_cache.lock().unwrap().entry(payment_hash)
+ .or_insert_with(|| PaymentInfo::new())
+ .paths.push(path);
+ }
+
+ // Find the path we want to remove in `payment_cache`. If it doesn't exist, do nothing.
+ fn remove_path_inflight_htlcs(&self, payment_hash: PaymentHash, path: &Vec<RouteHop>) {
+ self.payment_cache.lock().unwrap().entry(payment_hash)
+ .and_modify(|payment_info| {
+ if let Some(idx) = payment_info.paths.iter().position(|p| p == path) {
+ payment_info.paths.swap_remove(idx);
+ }
+ });
+ }
+
fn retry_payment(
&self, payment_id: PaymentId, payment_hash: PaymentHash, params: &RouteParameters
) -> Result<(), ()> {
- let attempts =
- *self.payment_cache.lock().unwrap().entry(payment_hash)
- .and_modify(|attempts| attempts.count += 1)
- .or_insert(PaymentAttempts {
- count: 1,
- first_attempted_at: T::now()
- });
+ let attempts = self.payment_cache.lock().unwrap().entry(payment_hash)
+ .and_modify(|info| info.attempts.count += 1 )
+ .or_insert_with(|| PaymentInfo {
+ attempts: PaymentAttempts {
+ count: 1,
+ first_attempted_at: T::now(),
+ },
+ paths: vec![],
+ }).attempts;
if !self.retry.is_retryable_now(&attempts) {
log_trace!(self.logger, "Payment {} exceeded maximum attempts; not retrying ({})", log_bytes!(payment_hash.0), attempts);
let payer = self.payer.node_id();
let first_hops = self.payer.first_hops();
+ let inflight_htlcs = self.create_inflight_map();
+
let route = self.router.find_route(
&payer, ¶ms, &payment_hash, Some(&first_hops.iter().collect::<Vec<_>>()),
- &self.scorer.lock()
+ &AccountForInFlightHtlcs { scorer: &mut self.scorer.lock(), inflight_htlcs }
);
+
if route.is_err() {
log_trace!(self.logger, "Failed to find a route for payment {}; not retrying ({:})", log_bytes!(payment_hash.0), attempts);
return Err(());
}
- match self.payer.retry_payment(&route.unwrap(), payment_id) {
- Ok(()) => Ok(()),
+ match self.payer.retry_payment(&route.as_ref().unwrap(), payment_id) {
+ Ok(()) => {
+ for path in route.unwrap().paths.into_iter() {
+ self.process_path_inflight_htlcs(payment_hash, path);
+ }
+ Ok(())
+ },
Err(PaymentSendFailure::ParameterError(_)) |
Err(PaymentSendFailure::PathParameterError(_)) => {
log_trace!(self.logger, "Failed to retry for payment {} due to bogus route/payment data, not retrying.", log_bytes!(payment_hash.0));
Err(PaymentSendFailure::AllFailedRetrySafe(_)) => {
self.retry_payment(payment_id, payment_hash, params)
},
- Err(PaymentSendFailure::PartialFailure { failed_paths_retry, .. }) => {
+ Err(PaymentSendFailure::PartialFailure { failed_paths_retry, results, .. }) => {
+ // If a `PartialFailure` error contains a result that is an `Ok()`, it means that
+ // part of our payment is retried. When we receive `MonitorUpdateFailed`, it
+ // means that we are still waiting for our channel monitor update to complete.
+ for (result, path) in results.iter().zip(route.unwrap().paths.into_iter()) {
+ match result {
+ Ok(_) | Err(APIError::MonitorUpdateFailed) => {
+ self.process_path_inflight_htlcs(payment_hash, path);
+ },
+ _ => {},
+ }
+ }
+
if let Some(retry) = failed_paths_retry {
// Always return Ok for the same reason as noted in pay_internal.
let _ = self.retry_payment(payment_id, payment_hash, &retry);
pub fn remove_cached_payment(&self, payment_hash: &PaymentHash) {
self.payment_cache.lock().unwrap().remove(payment_hash);
}
+
+ /// Given a [`PaymentHash`], this function looks up inflight path attempts in the payment_cache.
+ /// Then, it uses the path information inside the cache to construct a HashMap mapping a channel's
+ /// short channel id and direction to the amount being sent through it.
+ ///
+ /// This function should be called whenever we need information about currently used up liquidity
+ /// across payments.
+ fn create_inflight_map(&self) -> HashMap<(u64, bool), u64> {
+ let mut total_inflight_map: HashMap<(u64, bool), u64> = HashMap::new();
+ // Make an attempt at finding existing payment information from `payment_cache`. If it
+ // does not exist, it probably is a fresh payment and we can just return an empty
+ // HashMap.
+ for payment_info in self.payment_cache.lock().unwrap().values() {
+ for path in &payment_info.paths {
+ if path.is_empty() { break };
+ // total_inflight_map needs to be direction-sensitive when keeping track of the HTLC value
+ // that is held up. However, the `hops` array, which is a path returned by `find_route` in
+ // the router excludes the payer node. In the following lines, the payer's information is
+ // hardcoded with an inflight value of 0 so that we can correctly represent the first hop
+ // in our sliding window of two.
+ let our_node_id: PublicKey = self.payer.node_id();
+ let reversed_hops_with_payer = path.iter().rev().skip(1)
+ .map(|hop| hop.pubkey)
+ .chain(core::iter::once(our_node_id));
+ let mut cumulative_msat = 0;
+
+ // Taking the reversed vector from above, we zip it with just the reversed hops list to
+ // work "backwards" of the given path, since the last hop's `fee_msat` actually represents
+ // the total amount sent.
+ for (next_hop, prev_hop) in path.iter().rev().zip(reversed_hops_with_payer) {
+ cumulative_msat += next_hop.fee_msat;
+ total_inflight_map
+ .entry((next_hop.short_channel_id, NodeId::from_pubkey(&prev_hop) < NodeId::from_pubkey(&next_hop.pubkey)))
+ .and_modify(|used_liquidity_msat| *used_liquidity_msat += cumulative_msat)
+ .or_insert(cumulative_msat);
+ }
+ }
+ }
+
+ total_inflight_map
+ }
}
fn expiry_time_from_unix_epoch(invoice: &Invoice) -> Duration {
} else { false }
}
-impl<P: Deref, R, S: Deref, L: Deref, E: EventHandler, T: Time> EventHandler for InvoicePayerUsingTime<P, R, S, L, E, T>
+impl<P: Deref, R: Router, S: Deref, L: Deref, E: EventHandler, T: Time> EventHandler for InvoicePayerUsingTime<P, R, S, L, E, T>
where
P::Target: Payer,
- R: for <'a> Router<<<S as Deref>::Target as LockableScore<'a>>::Locked>,
S::Target: for <'a> LockableScore<'a>,
L::Target: Logger,
{
fn handle_event(&self, event: &Event) {
+ match event {
+ Event::PaymentPathFailed { payment_hash, path, .. }
+ | Event::PaymentPathSuccessful { path, payment_hash: Some(payment_hash), .. }
+ | Event::ProbeSuccessful { payment_hash, path, .. }
+ | Event::ProbeFailed { payment_hash, path, .. } => {
+ self.remove_path_inflight_htlcs(*payment_hash, path);
+ },
+ _ => {},
+ }
+
match event {
Event::PaymentPathFailed {
payment_id, payment_hash, rejected_by_dest, path, short_channel_id, retry, ..
let mut payment_cache = self.payment_cache.lock().unwrap();
let attempts = payment_cache
.remove(payment_hash)
- .map_or(1, |attempts| attempts.count + 1);
+ .map_or(1, |payment_info| payment_info.attempts.count + 1);
log_trace!(self.logger, "Payment {} succeeded (attempts: {})", log_bytes!(payment_hash.0), attempts);
},
Event::ProbeSuccessful { payment_hash, path, .. } => {
use lightning::ln::features::{ChannelFeatures, NodeFeatures, InitFeatures};
use lightning::ln::functional_test_utils::*;
use lightning::ln::msgs::{ChannelMessageHandler, ErrorAction, LightningError};
- use lightning::routing::gossip::NodeId;
+ use lightning::routing::gossip::{EffectiveCapacity, NodeId};
use lightning::routing::router::{PaymentParameters, Route, RouteHop};
use lightning::routing::scoring::ChannelUsage;
use lightning::util::test_utils::TestLogger;
use std::time::{SystemTime, Duration};
use time_utils::tests::SinceEpoch;
use DEFAULT_EXPIRY_TIME;
+ use lightning::util::errors::APIError::{ChannelUnavailable, MonitorUpdateFailed};
fn invoice(payment_preimage: PaymentPreimage) -> Invoice {
let payment_hash = Sha256::hash(&payment_preimage.0);
let final_value_msat = invoice.amount_milli_satoshis().unwrap();
let payer = TestPayer::new()
- .fails_with_partial_failure(retry.clone(), OnAttempt(1))
- .fails_with_partial_failure(retry, OnAttempt(2))
+ .fails_with_partial_failure(retry.clone(), OnAttempt(1), None)
+ .fails_with_partial_failure(retry, OnAttempt(2), None)
.expect_send(Amount::ForInvoice(final_value_msat))
.expect_send(Amount::OnRetry(final_value_msat / 2))
.expect_send(Amount::OnRetry(final_value_msat / 2));
invoice_payer.handle_event(&event);
}
+ #[test]
+ fn generates_correct_inflight_map_data() {
+ let event_handled = core::cell::RefCell::new(false);
+ let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+
+ let payment_preimage = PaymentPreimage([1; 32]);
+ let invoice = invoice(payment_preimage);
+ let payment_hash = Some(PaymentHash(invoice.payment_hash().clone().into_inner()));
+ let final_value_msat = invoice.amount_milli_satoshis().unwrap();
+
+ let payer = TestPayer::new().expect_send(Amount::ForInvoice(final_value_msat));
+ let final_value_msat = invoice.amount_milli_satoshis().unwrap();
+ let route = TestRouter::route_for_value(final_value_msat);
+ let router = TestRouter {};
+ let scorer = RefCell::new(TestScorer::new());
+ let logger = TestLogger::new();
+ let invoice_payer =
+ InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, Retry::Attempts(0));
+
+ let payment_id = invoice_payer.pay_invoice(&invoice).unwrap();
+
+ let inflight_map = invoice_payer.create_inflight_map();
+ // First path check
+ assert_eq!(inflight_map.get(&(0, false)).unwrap().clone(), 94);
+ assert_eq!(inflight_map.get(&(1, true)).unwrap().clone(), 84);
+ assert_eq!(inflight_map.get(&(2, false)).unwrap().clone(), 64);
+
+ // Second path check
+ assert_eq!(inflight_map.get(&(3, false)).unwrap().clone(), 74);
+ assert_eq!(inflight_map.get(&(4, false)).unwrap().clone(), 64);
+
+ invoice_payer.handle_event(&Event::PaymentPathSuccessful {
+ payment_id, payment_hash, path: route.paths[0].clone()
+ });
+
+ let inflight_map = invoice_payer.create_inflight_map();
+
+ assert_eq!(inflight_map.get(&(0, false)), None);
+ assert_eq!(inflight_map.get(&(1, true)), None);
+ assert_eq!(inflight_map.get(&(2, false)), None);
+
+ // Second path should still be inflight
+ assert_eq!(inflight_map.get(&(3, false)).unwrap().clone(), 74);
+ assert_eq!(inflight_map.get(&(4, false)).unwrap().clone(), 64)
+ }
+
+ #[test]
+ fn considers_inflight_htlcs_between_invoice_payments_when_path_succeeds() {
+ // First, let's just send a payment through, but only make sure one of the path completes
+ let event_handled = core::cell::RefCell::new(false);
+ let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+
+ let payment_preimage = PaymentPreimage([1; 32]);
+ let payment_invoice = invoice(payment_preimage);
+ let payment_hash = Some(PaymentHash(payment_invoice.payment_hash().clone().into_inner()));
+ let final_value_msat = payment_invoice.amount_milli_satoshis().unwrap();
+
+ let payer = TestPayer::new()
+ .expect_send(Amount::ForInvoice(final_value_msat))
+ .expect_send(Amount::ForInvoice(final_value_msat));
+ let final_value_msat = payment_invoice.amount_milli_satoshis().unwrap();
+ let route = TestRouter::route_for_value(final_value_msat);
+ let router = TestRouter {};
+ let scorer = RefCell::new(TestScorer::new()
+ // 1st invoice, 1st path
+ .expect_usage(ChannelUsage { amount_msat: 64, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+ .expect_usage(ChannelUsage { amount_msat: 84, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+ .expect_usage(ChannelUsage { amount_msat: 94, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+ // 1st invoice, 2nd path
+ .expect_usage(ChannelUsage { amount_msat: 64, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+ .expect_usage(ChannelUsage { amount_msat: 74, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+ // 2nd invoice, 1st path
+ .expect_usage(ChannelUsage { amount_msat: 64, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+ .expect_usage(ChannelUsage { amount_msat: 84, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+ .expect_usage(ChannelUsage { amount_msat: 94, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+ // 2nd invoice, 2nd path
+ .expect_usage(ChannelUsage { amount_msat: 64, inflight_htlc_msat: 64, effective_capacity: EffectiveCapacity::Unknown } )
+ .expect_usage(ChannelUsage { amount_msat: 74, inflight_htlc_msat: 74, effective_capacity: EffectiveCapacity::Unknown } )
+ );
+ let logger = TestLogger::new();
+ let invoice_payer =
+ InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, Retry::Attempts(0));
+
+ // Succeed 1st path, leave 2nd path inflight
+ let payment_id = invoice_payer.pay_invoice(&payment_invoice).unwrap();
+ invoice_payer.handle_event(&Event::PaymentPathSuccessful {
+ payment_id, payment_hash, path: route.paths[0].clone()
+ });
+
+ // Let's pay a second invoice that will be using the same path. This should trigger the
+ // assertions that expect the last 4 ChannelUsage values above where TestScorer is initialized.
+ // Particularly, the 2nd path of the 1st payment, since it is not yet complete, should still
+ // have 64 msats inflight for paths considering the channel with scid of 1.
+ let payment_preimage_2 = PaymentPreimage([2; 32]);
+ let payment_invoice_2 = invoice(payment_preimage_2);
+ invoice_payer.pay_invoice(&payment_invoice_2).unwrap();
+ }
+
+ #[test]
+ fn considers_inflight_htlcs_between_retries() {
+ // First, let's just send a payment through, but only make sure one of the path completes
+ let event_handled = core::cell::RefCell::new(false);
+ let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+
+ let payment_preimage = PaymentPreimage([1; 32]);
+ let payment_invoice = invoice(payment_preimage);
+ let payment_hash = PaymentHash(payment_invoice.payment_hash().clone().into_inner());
+ let final_value_msat = payment_invoice.amount_milli_satoshis().unwrap();
+
+ let payer = TestPayer::new()
+ .expect_send(Amount::ForInvoice(final_value_msat))
+ .expect_send(Amount::OnRetry(final_value_msat / 2))
+ .expect_send(Amount::OnRetry(final_value_msat / 4));
+ let final_value_msat = payment_invoice.amount_milli_satoshis().unwrap();
+ let router = TestRouter {};
+ let scorer = RefCell::new(TestScorer::new()
+ // 1st invoice, 1st path
+ .expect_usage(ChannelUsage { amount_msat: 64, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+ .expect_usage(ChannelUsage { amount_msat: 84, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+ .expect_usage(ChannelUsage { amount_msat: 94, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+ // 1st invoice, 2nd path
+ .expect_usage(ChannelUsage { amount_msat: 64, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+ .expect_usage(ChannelUsage { amount_msat: 74, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+ // Retry 1, 1st path
+ .expect_usage(ChannelUsage { amount_msat: 32, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+ .expect_usage(ChannelUsage { amount_msat: 52, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+ .expect_usage(ChannelUsage { amount_msat: 62, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+ // Retry 1, 2nd path
+ .expect_usage(ChannelUsage { amount_msat: 32, inflight_htlc_msat: 64, effective_capacity: EffectiveCapacity::Unknown } )
+ .expect_usage(ChannelUsage { amount_msat: 42, inflight_htlc_msat: 64 + 10, effective_capacity: EffectiveCapacity::Unknown } )
+ // Retry 2, 1st path
+ .expect_usage(ChannelUsage { amount_msat: 16, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+ .expect_usage(ChannelUsage { amount_msat: 36, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+ .expect_usage(ChannelUsage { amount_msat: 46, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown } )
+ // Retry 2, 2nd path
+ .expect_usage(ChannelUsage { amount_msat: 16, inflight_htlc_msat: 64 + 32, effective_capacity: EffectiveCapacity::Unknown } )
+ .expect_usage(ChannelUsage { amount_msat: 26, inflight_htlc_msat: 74 + 32 + 10, effective_capacity: EffectiveCapacity::Unknown } )
+ );
+ let logger = TestLogger::new();
+ let invoice_payer =
+ InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, Retry::Attempts(2));
+
+ // Fail 1st path, leave 2nd path inflight
+ let payment_id = Some(invoice_payer.pay_invoice(&payment_invoice).unwrap());
+ invoice_payer.handle_event(&Event::PaymentPathFailed {
+ payment_id,
+ payment_hash,
+ network_update: None,
+ rejected_by_dest: false,
+ all_paths_failed: false,
+ path: TestRouter::path_for_value(final_value_msat),
+ short_channel_id: None,
+ retry: Some(TestRouter::retry_for_invoice(&payment_invoice)),
+ });
+
+ // Fails again the 1st path of our retry
+ invoice_payer.handle_event(&Event::PaymentPathFailed {
+ payment_id,
+ payment_hash,
+ network_update: None,
+ rejected_by_dest: false,
+ all_paths_failed: false,
+ path: TestRouter::path_for_value(final_value_msat / 2),
+ short_channel_id: None,
+ retry: Some(RouteParameters {
+ final_value_msat: final_value_msat / 4,
+ ..TestRouter::retry_for_invoice(&payment_invoice)
+ }),
+ });
+ }
+
+ #[test]
+ fn accounts_for_some_inflight_htlcs_sent_during_partial_failure() {
+ let event_handled = core::cell::RefCell::new(false);
+ let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+
+ let payment_preimage = PaymentPreimage([1; 32]);
+ let invoice_to_pay = invoice(payment_preimage);
+ let final_value_msat = invoice_to_pay.amount_milli_satoshis().unwrap();
+
+ let retry = TestRouter::retry_for_invoice(&invoice_to_pay);
+ let payer = TestPayer::new()
+ .fails_with_partial_failure(
+ retry.clone(), OnAttempt(1),
+ Some(vec![
+ Err(ChannelUnavailable { err: "abc".to_string() }), Err(MonitorUpdateFailed)
+ ]))
+ .expect_send(Amount::ForInvoice(final_value_msat));
+
+ let router = TestRouter {};
+ let scorer = RefCell::new(TestScorer::new());
+ let logger = TestLogger::new();
+ let invoice_payer =
+ InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, Retry::Attempts(0));
+
+ invoice_payer.pay_invoice(&invoice_to_pay).unwrap();
+ let inflight_map = invoice_payer.create_inflight_map();
+
+ // Only the second path, which failed with `MonitorUpdateFailed` should be added to our
+ // inflight map because retries are disabled.
+ assert_eq!(inflight_map.len(), 2);
+ }
+
+ #[test]
+ fn accounts_for_all_inflight_htlcs_sent_during_partial_failure() {
+ let event_handled = core::cell::RefCell::new(false);
+ let event_handler = |_: &_| { *event_handled.borrow_mut() = true; };
+
+ let payment_preimage = PaymentPreimage([1; 32]);
+ let invoice_to_pay = invoice(payment_preimage);
+ let final_value_msat = invoice_to_pay.amount_milli_satoshis().unwrap();
+
+ let retry = TestRouter::retry_for_invoice(&invoice_to_pay);
+ let payer = TestPayer::new()
+ .fails_with_partial_failure(
+ retry.clone(), OnAttempt(1),
+ Some(vec![
+ Ok(()), Err(MonitorUpdateFailed)
+ ]))
+ .expect_send(Amount::ForInvoice(final_value_msat));
+
+ let router = TestRouter {};
+ let scorer = RefCell::new(TestScorer::new());
+ let logger = TestLogger::new();
+ let invoice_payer =
+ InvoicePayer::new(&payer, router, &scorer, &logger, event_handler, Retry::Attempts(0));
+
+ invoice_payer.pay_invoice(&invoice_to_pay).unwrap();
+ let inflight_map = invoice_payer.create_inflight_map();
+
+ // All paths successful, hence we check of the existence of all 5 hops.
+ assert_eq!(inflight_map.len(), 5);
+ }
+
struct TestRouter;
impl TestRouter {
fn route_for_value(final_value_msat: u64) -> Route {
Route {
paths: vec![
- vec![RouteHop {
- pubkey: PublicKey::from_slice(&hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]).unwrap(),
- channel_features: ChannelFeatures::empty(),
- node_features: NodeFeatures::empty(),
- short_channel_id: 0, fee_msat: final_value_msat / 2, cltv_expiry_delta: 144
- }],
- vec![RouteHop {
- pubkey: PublicKey::from_slice(&hex::decode("0324653eac434488002cc06bbfb7f10fe18991e35f9fe4302dbea6d2353dc0ab1c").unwrap()[..]).unwrap(),
- channel_features: ChannelFeatures::empty(),
- node_features: NodeFeatures::empty(),
- short_channel_id: 1, fee_msat: final_value_msat / 2, cltv_expiry_delta: 144
- }],
+ vec![
+ RouteHop {
+ pubkey: PublicKey::from_slice(&hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]).unwrap(),
+ channel_features: ChannelFeatures::empty(),
+ node_features: NodeFeatures::empty(),
+ short_channel_id: 0,
+ fee_msat: 10,
+ cltv_expiry_delta: 0
+ },
+ RouteHop {
+ pubkey: PublicKey::from_slice(&hex::decode("0324653eac434488002cc06bbfb7f10fe18991e35f9fe4302dbea6d2353dc0ab1c").unwrap()[..]).unwrap(),
+ channel_features: ChannelFeatures::empty(),
+ node_features: NodeFeatures::empty(),
+ short_channel_id: 1,
+ fee_msat: 20,
+ cltv_expiry_delta: 0
+ },
+ RouteHop {
+ pubkey: PublicKey::from_slice(&hex::decode("027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007").unwrap()[..]).unwrap(),
+ channel_features: ChannelFeatures::empty(),
+ node_features: NodeFeatures::empty(),
+ short_channel_id: 2,
+ fee_msat: final_value_msat / 2,
+ cltv_expiry_delta: 0
+ },
+ ],
+ vec![
+ RouteHop {
+ pubkey: PublicKey::from_slice(&hex::decode("029e03a901b85534ff1e92c43c74431f7ce72046060fcf7a95c37e148f78c77255").unwrap()[..]).unwrap(),
+ channel_features: ChannelFeatures::empty(),
+ node_features: NodeFeatures::empty(),
+ short_channel_id: 3,
+ fee_msat: 10,
+ cltv_expiry_delta: 144
+ },
+ RouteHop {
+ pubkey: PublicKey::from_slice(&hex::decode("027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007").unwrap()[..]).unwrap(),
+ channel_features: ChannelFeatures::empty(),
+ node_features: NodeFeatures::empty(),
+ short_channel_id: 4,
+ fee_msat: final_value_msat / 2,
+ cltv_expiry_delta: 144
+ }
+ ],
],
payment_params: None,
}
}
}
- impl<S: Score> Router<S> for TestRouter {
- fn find_route(
- &self, _payer: &PublicKey, route_params: &RouteParameters, _payment_hash: &PaymentHash,
- _first_hops: Option<&[&ChannelDetails]>, _scorer: &S
+ impl Router for TestRouter {
+ fn find_route<S: Score>(
+ &self, payer: &PublicKey, route_params: &RouteParameters, _payment_hash: &PaymentHash,
+ _first_hops: Option<&[&ChannelDetails]>, scorer: &S
) -> Result<Route, LightningError> {
+ // Simulate calling the Scorer just as you would in find_route
+ let route = Self::route_for_value(route_params.final_value_msat);
+ for path in route.paths {
+ let mut aggregate_msat = 0u64;
+ for (idx, hop) in path.iter().rev().enumerate() {
+ aggregate_msat += hop.fee_msat;
+ let usage = ChannelUsage {
+ amount_msat: aggregate_msat,
+ inflight_htlc_msat: 0,
+ effective_capacity: EffectiveCapacity::Unknown,
+ };
+
+ // Since the path is reversed, the last element in our iteration is the first
+ // hop.
+ if idx == path.len() - 1 {
+ scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(payer), &NodeId::from_pubkey(&hop.pubkey), usage);
+ } else {
+ scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(&path[idx + 1].pubkey), &NodeId::from_pubkey(&hop.pubkey), usage);
+ }
+ }
+ }
+
Ok(Route {
payment_params: Some(route_params.payment_params.clone()), ..Self::route_for_value(route_params.final_value_msat)
})
struct FailingRouter;
- impl<S: Score> Router<S> for FailingRouter {
- fn find_route(
+ impl Router for FailingRouter {
+ fn find_route<S: Score>(
&self, _payer: &PublicKey, _params: &RouteParameters, _payment_hash: &PaymentHash,
_first_hops: Option<&[&ChannelDetails]>, _scorer: &S
) -> Result<Route, LightningError> {
}
struct TestScorer {
- expectations: Option<VecDeque<TestResult>>,
+ event_expectations: Option<VecDeque<TestResult>>,
+ scorer_expectations: RefCell<Option<VecDeque<ChannelUsage>>>,
}
#[derive(Debug)]
impl TestScorer {
fn new() -> Self {
Self {
- expectations: None,
+ event_expectations: None,
+ scorer_expectations: RefCell::new(None),
}
}
fn expect(mut self, expectation: TestResult) -> Self {
- self.expectations.get_or_insert_with(|| VecDeque::new()).push_back(expectation);
+ self.event_expectations.get_or_insert_with(|| VecDeque::new()).push_back(expectation);
+ self
+ }
+
+ fn expect_usage(self, expectation: ChannelUsage) -> Self {
+ self.scorer_expectations.borrow_mut().get_or_insert_with(|| VecDeque::new()).push_back(expectation);
self
}
}
impl Score for TestScorer {
fn channel_penalty_msat(
- &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage
- ) -> u64 { 0 }
+ &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, usage: ChannelUsage
+ ) -> u64 {
+ if let Some(scorer_expectations) = self.scorer_expectations.borrow_mut().as_mut() {
+ match scorer_expectations.pop_front() {
+ Some(expectation) => {
+ assert_eq!(expectation.amount_msat, usage.amount_msat);
+ assert_eq!(expectation.inflight_htlc_msat, usage.inflight_htlc_msat);
+ },
+ None => {},
+ }
+ }
+ 0
+ }
fn payment_path_failed(&mut self, actual_path: &[&RouteHop], actual_short_channel_id: u64) {
- if let Some(expectations) = &mut self.expectations {
+ if let Some(expectations) = &mut self.event_expectations {
match expectations.pop_front() {
Some(TestResult::PaymentFailure { path, short_channel_id }) => {
assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
}
fn payment_path_successful(&mut self, actual_path: &[&RouteHop]) {
- if let Some(expectations) = &mut self.expectations {
+ if let Some(expectations) = &mut self.event_expectations {
match expectations.pop_front() {
Some(TestResult::PaymentFailure { path, .. }) => {
panic!("Unexpected payment path failure: {:?}", path)
}
fn probe_failed(&mut self, actual_path: &[&RouteHop], _: u64) {
- if let Some(expectations) = &mut self.expectations {
+ if let Some(expectations) = &mut self.event_expectations {
match expectations.pop_front() {
Some(TestResult::PaymentFailure { path, .. }) => {
panic!("Unexpected failed payment path: {:?}", path)
}
}
fn probe_successful(&mut self, actual_path: &[&RouteHop]) {
- if let Some(expectations) = &mut self.expectations {
+ if let Some(expectations) = &mut self.event_expectations {
match expectations.pop_front() {
Some(TestResult::PaymentFailure { path, .. }) => {
panic!("Unexpected payment path failure: {:?}", path)
return;
}
- if let Some(expectations) = &self.expectations {
- if !expectations.is_empty() {
- panic!("Unsatisfied scorer expectations: {:?}", expectations);
+ if let Some(event_expectations) = &self.event_expectations {
+ if !event_expectations.is_empty() {
+ panic!("Unsatisfied event expectations: {:?}", event_expectations);
+ }
+ }
+
+ if let Some(scorer_expectations) = self.scorer_expectations.borrow().as_ref() {
+ if !scorer_expectations.is_empty() {
+ panic!("Unsatisfied scorer expectations: {:?}", scorer_expectations)
}
}
}
self.fails_with(failure, OnAttempt(attempt))
}
- fn fails_with_partial_failure(self, retry: RouteParameters, attempt: OnAttempt) -> Self {
+ fn fails_with_partial_failure(self, retry: RouteParameters, attempt: OnAttempt, results: Option<Vec<Result<(), APIError>>>) -> Self {
self.fails_with(PaymentSendFailure::PartialFailure {
- results: vec![],
+ results: results.unwrap_or(vec![]),
failed_paths_retry: Some(retry),
payment_id: PaymentId([1; 32]),
}, attempt)
// *** Full Featured Functional Tests with a Real ChannelManager ***
struct ManualRouter(RefCell<VecDeque<Result<Route, LightningError>>>);
- impl<S: Score> Router<S> for ManualRouter {
- fn find_route(
+ impl Router for ManualRouter {
+ fn find_route<S: Score>(
&self, _payer: &PublicKey, _params: &RouteParameters, _payment_hash: &PaymentHash,
_first_hops: Option<&[&ChannelDetails]>, _scorer: &S
) -> Result<Route, LightningError> {
}
}
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Score> Router<S> for DefaultRouter<G, L>
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref> Router for DefaultRouter<G, L>
where L::Target: Logger {
- fn find_route(
+ fn find_route<S: Score>(
&self, payer: &PublicKey, params: &RouteParameters, _payment_hash: &PaymentHash,
first_hops: Option<&[&ChannelDetails]>, scorer: &S
) -> Result<Route, LightningError> {
rustdoc-args = ["--cfg", "docsrs"]
[dependencies]
-bitcoin = "0.28.1"
+bitcoin = "0.29.0"
lightning = { version = "0.0.110", path = "../lightning" }
tokio = { version = "1.0", features = [ "io-util", "macros", "rt", "sync", "net", "time" ] }
//! }
//! ```
+// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
#![deny(broken_intra_doc_links)]
-#![deny(missing_docs)]
+#![deny(private_intra_doc_links)]
+#![deny(missing_docs)]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
use bitcoin::secp256k1::PublicKey;
use lightning::ln::peer_handler;
use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
use lightning::ln::peer_handler::CustomMessageHandler;
-use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, NetAddress};
+use lightning::ln::msgs::{ChannelMessageHandler, NetAddress, OnionMessageHandler, RoutingMessageHandler};
use lightning::util::logger::Logger;
use std::ops::Deref;
id: u64,
}
impl Connection {
- async fn poll_event_process<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, L, UMH>>, mut event_receiver: mpsc::Receiver<()>) where
+ async fn poll_event_process<CMH, RMH, OMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH>>, mut event_receiver: mpsc::Receiver<()>) where
CMH: Deref + 'static + Send + Sync,
RMH: Deref + 'static + Send + Sync,
+ OMH: Deref + 'static + Send + Sync,
L: Deref + 'static + Send + Sync,
UMH: Deref + 'static + Send + Sync,
CMH::Target: ChannelMessageHandler + Send + Sync,
RMH::Target: RoutingMessageHandler + Send + Sync,
+ OMH::Target: OnionMessageHandler + Send + Sync,
L::Target: Logger + Send + Sync,
UMH::Target: CustomMessageHandler + Send + Sync,
{
}
}
- async fn schedule_read<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, L, UMH>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where
+ async fn schedule_read<CMH, RMH, OMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where
CMH: Deref + 'static + Send + Sync,
RMH: Deref + 'static + Send + Sync,
+ OMH: Deref + 'static + Send + Sync,
L: Deref + 'static + Send + Sync,
UMH: Deref + 'static + Send + Sync,
CMH::Target: ChannelMessageHandler + 'static + Send + Sync,
RMH::Target: RoutingMessageHandler + 'static + Send + Sync,
+ OMH::Target: OnionMessageHandler + 'static + Send + Sync,
L::Target: Logger + 'static + Send + Sync,
UMH::Target: CustomMessageHandler + 'static + Send + Sync,
{
/// The returned future will complete when the peer is disconnected and associated handling
/// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
/// not need to poll the provided future in order to make progress.
-pub fn setup_inbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, L, UMH>>, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
+pub fn setup_inbound<CMH, RMH, OMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH>>, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
CMH: Deref + 'static + Send + Sync,
RMH: Deref + 'static + Send + Sync,
+ OMH: Deref + 'static + Send + Sync,
L: Deref + 'static + Send + Sync,
UMH: Deref + 'static + Send + Sync,
CMH::Target: ChannelMessageHandler + Send + Sync,
RMH::Target: RoutingMessageHandler + Send + Sync,
+ OMH::Target: OnionMessageHandler + Send + Sync,
L::Target: Logger + Send + Sync,
UMH::Target: CustomMessageHandler + Send + Sync,
{
/// The returned future will complete when the peer is disconnected and associated handling
/// futures are freed, though, because all processing futures are spawned with tokio::spawn, you do
/// not need to poll the provided future in order to make progress.
-pub fn setup_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, L, UMH>>, their_node_id: PublicKey, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
+pub fn setup_outbound<CMH, RMH, OMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH>>, their_node_id: PublicKey, stream: StdTcpStream) -> impl std::future::Future<Output=()> where
CMH: Deref + 'static + Send + Sync,
RMH: Deref + 'static + Send + Sync,
+ OMH: Deref + 'static + Send + Sync,
L: Deref + 'static + Send + Sync,
UMH: Deref + 'static + Send + Sync,
CMH::Target: ChannelMessageHandler + Send + Sync,
RMH::Target: RoutingMessageHandler + Send + Sync,
+ OMH::Target: OnionMessageHandler + Send + Sync,
L::Target: Logger + Send + Sync,
UMH::Target: CustomMessageHandler + Send + Sync,
{
/// disconnected and associated handling futures are freed, though, because all processing in said
/// futures are spawned with tokio::spawn, you do not need to poll the second future in order to
/// make progress.
-pub async fn connect_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, L, UMH>>, their_node_id: PublicKey, addr: SocketAddr) -> Option<impl std::future::Future<Output=()>> where
+pub async fn connect_outbound<CMH, RMH, OMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, CMH, RMH, OMH, L, UMH>>, their_node_id: PublicKey, addr: SocketAddr) -> Option<impl std::future::Future<Output=()>> where
CMH: Deref + 'static + Send + Sync,
RMH: Deref + 'static + Send + Sync,
+ OMH: Deref + 'static + Send + Sync,
L: Deref + 'static + Send + Sync,
UMH: Deref + 'static + Send + Sync,
CMH::Target: ChannelMessageHandler + Send + Sync,
RMH::Target: RoutingMessageHandler + Send + Sync,
+ OMH::Target: OnionMessageHandler + Send + Sync,
L::Target: Logger + Send + Sync,
UMH::Target: CustomMessageHandler + Send + Sync,
{
fn handle_node_announcement(&self, _msg: &NodeAnnouncement) -> Result<bool, LightningError> { Ok(false) }
fn handle_channel_announcement(&self, _msg: &ChannelAnnouncement) -> Result<bool, LightningError> { Ok(false) }
fn handle_channel_update(&self, _msg: &ChannelUpdate) -> Result<bool, LightningError> { Ok(false) }
- fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) -> Vec<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> { Vec::new() }
- fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec<NodeAnnouncement> { Vec::new() }
+ fn get_next_channel_announcement(&self, _starting_point: u64) -> Option<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> { None }
+ fn get_next_node_announcement(&self, _starting_point: Option<&PublicKey>) -> Option<NodeAnnouncement> { None }
fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &Init) { }
fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
let a_manager = Arc::new(PeerManager::new(MessageHandler {
chan_handler: Arc::clone(&a_handler),
route_handler: Arc::clone(&a_handler),
+ onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
}, a_key.clone(), &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{})));
let (b_connected_sender, mut b_connected) = mpsc::channel(1);
let b_manager = Arc::new(PeerManager::new(MessageHandler {
chan_handler: Arc::clone(&b_handler),
route_handler: Arc::clone(&b_handler),
+ onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
}, b_key.clone(), &[2; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{})));
// We bind on localhost, hoping the environment is properly configured with a local
let a_manager = Arc::new(PeerManager::new(MessageHandler {
chan_handler: Arc::new(lightning::ln::peer_handler::ErroringMessageHandler::new()),
+ onion_message_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
route_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
}, a_key, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{})));
_bench_unstable = ["lightning/_bench_unstable"]
[dependencies]
-bitcoin = "0.28.1"
+bitcoin = "0.29.0"
lightning = { version = "0.0.110", path = "../lightning" }
libc = "0.2"
//! Utilities that handle persisting Rust-Lightning data to disk via standard filesystem APIs.
+// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
#![deny(broken_intra_doc_links)]
+#![deny(private_intra_doc_links)]
+
#![deny(missing_docs)]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
use crate::FilesystemPersister;
use bitcoin::blockdata::block::{Block, BlockHeader};
use bitcoin::hashes::hex::FromHex;
- use bitcoin::Txid;
+ use bitcoin::{Txid, TxMerkleNode};
use lightning::chain::ChannelMonitorUpdateErr;
use lightning::chain::chainmonitor::Persist;
use lightning::chain::transaction::OutPoint;
use lightning::util::events::{ClosureReason, MessageSendEventsProvider};
use lightning::util::test_utils;
use std::fs;
+ use bitcoin::hashes::Hash;
#[cfg(target_os = "windows")]
use {
lightning::get_event_msg,
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 1);
- let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[0].clone()]});
check_closed_broadcast!(nodes[1], true);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
[dependencies]
lightning = { version = "0.0.110", path = "../lightning" }
-bitcoin = { version = "0.28.1", default-features = false }
+bitcoin = { version = "0.29.0", default-features = false }
[dev-dependencies]
lightning = { version = "0.0.110", path = "../lightning", features = ["_test_utils"] }
+// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
+#![deny(broken_intra_doc_links)]
+#![deny(private_intra_doc_links)]
+
#![deny(missing_docs)]
#![deny(unsafe_code)]
-#![deny(broken_intra_doc_links)]
#![deny(non_upper_case_globals)]
#![deny(non_camel_case_types)]
#![deny(non_snake_case)]
default = ["std", "grind_signatures"]
[dependencies]
-bitcoin = { version = "0.28.1", default-features = false, features = ["secp-recovery"] }
+bitcoin = { version = "0.29.0", default-features = false, features = ["secp-recovery"] }
hashbrown = { version = "0.11", optional = true }
hex = { version = "0.4", optional = true }
regex = "1.5.6"
[dev-dependencies.bitcoin]
-version = "0.28.1"
+version = "0.29.0"
default-features = false
features = ["bitcoinconsensus", "secp-recovery"]
where
FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs>
{
- let mut dependent_txdata = Vec::new();
- {
- let monitor_states = self.monitors.write().unwrap();
- if let Some(height) = best_height {
- // If the best block height is being updated, update highest_chain_height under the
- // monitors write lock.
- let old_height = self.highest_chain_height.load(Ordering::Acquire);
- let new_height = height as usize;
- if new_height > old_height {
- self.highest_chain_height.store(new_height, Ordering::Release);
- }
+ let monitor_states = self.monitors.write().unwrap();
+ if let Some(height) = best_height {
+ // If the best block height is being updated, update highest_chain_height under the
+ // monitors write lock.
+ let old_height = self.highest_chain_height.load(Ordering::Acquire);
+ let new_height = height as usize;
+ if new_height > old_height {
+ self.highest_chain_height.store(new_height, Ordering::Release);
}
+ }
- for (funding_outpoint, monitor_state) in monitor_states.iter() {
- let monitor = &monitor_state.monitor;
- let mut txn_outputs;
- {
- txn_outputs = process(monitor, txdata);
- let update_id = MonitorUpdateId {
- contents: UpdateOrigin::ChainSync(self.sync_persistence_id.get_increment()),
- };
- let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
- if let Some(height) = best_height {
- if !monitor_state.has_pending_chainsync_updates(&pending_monitor_updates) {
- // If there are not ChainSync persists awaiting completion, go ahead and
- // set last_chain_persist_height here - we wouldn't want the first
- // TemporaryFailure to always immediately be considered "overly delayed".
- monitor_state.last_chain_persist_height.store(height as usize, Ordering::Release);
- }
+ for (funding_outpoint, monitor_state) in monitor_states.iter() {
+ let monitor = &monitor_state.monitor;
+ let mut txn_outputs;
+ {
+ txn_outputs = process(monitor, txdata);
+ let update_id = MonitorUpdateId {
+ contents: UpdateOrigin::ChainSync(self.sync_persistence_id.get_increment()),
+ };
+ let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
+ if let Some(height) = best_height {
+ if !monitor_state.has_pending_chainsync_updates(&pending_monitor_updates) {
+ // If there are not ChainSync persists awaiting completion, go ahead and
+ // set last_chain_persist_height here - we wouldn't want the first
+ // TemporaryFailure to always immediately be considered "overly delayed".
+ monitor_state.last_chain_persist_height.store(height as usize, Ordering::Release);
}
+ }
- log_trace!(self.logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
- match self.persister.update_persisted_channel(*funding_outpoint, &None, monitor, update_id) {
- Ok(()) =>
- log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
- Err(ChannelMonitorUpdateErr::PermanentFailure) => {
- monitor_state.channel_perm_failed.store(true, Ordering::Release);
- self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)], monitor.get_counterparty_node_id()));
- },
- Err(ChannelMonitorUpdateErr::TemporaryFailure) => {
- log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
- pending_monitor_updates.push(update_id);
- },
- }
+ log_trace!(self.logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
+ match self.persister.update_persisted_channel(*funding_outpoint, &None, monitor, update_id) {
+ Ok(()) =>
+ log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
+ Err(ChannelMonitorUpdateErr::PermanentFailure) => {
+ monitor_state.channel_perm_failed.store(true, Ordering::Release);
+ self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)], monitor.get_counterparty_node_id()));
+ },
+ Err(ChannelMonitorUpdateErr::TemporaryFailure) => {
+ log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
+ pending_monitor_updates.push(update_id);
+ },
}
+ }
- // Register any new outputs with the chain source for filtering, storing any dependent
- // transactions from within the block that previously had not been included in txdata.
- if let Some(ref chain_source) = self.chain_source {
- let block_hash = header.block_hash();
- for (txid, mut outputs) in txn_outputs.drain(..) {
- for (idx, output) in outputs.drain(..) {
- // Register any new outputs with the chain source for filtering and recurse
- // if it indicates that there are dependent transactions within the block
- // that had not been previously included in txdata.
- let output = WatchedOutput {
- block_hash: Some(block_hash),
- outpoint: OutPoint { txid, index: idx as u16 },
- script_pubkey: output.script_pubkey,
- };
- if let Some(tx) = chain_source.register_output(output) {
- dependent_txdata.push(tx);
- }
- }
+ // Register any new outputs with the chain source for filtering, storing any dependent
+ // transactions from within the block that previously had not been included in txdata.
+ if let Some(ref chain_source) = self.chain_source {
+ let block_hash = header.block_hash();
+ for (txid, mut outputs) in txn_outputs.drain(..) {
+ for (idx, output) in outputs.drain(..) {
+ // Register any new outputs with the chain source for filtering
+ let output = WatchedOutput {
+ block_hash: Some(block_hash),
+ outpoint: OutPoint { txid, index: idx as u16 },
+ script_pubkey: output.script_pubkey,
+ };
+ chain_source.register_output(output)
}
}
}
}
-
- // Recursively call for any dependent transactions that were identified by the chain source.
- if !dependent_txdata.is_empty() {
- dependent_txdata.sort_unstable_by_key(|(index, _tx)| *index);
- dependent_txdata.dedup_by_key(|(index, _tx)| *index);
- let txdata: Vec<_> = dependent_txdata.iter().map(|(index, tx)| (*index, tx)).collect();
- self.process_chain_data(header, None, &txdata, process); // We skip the best height the second go-around
- }
}
/// Creates a new `ChainMonitor` used to watch on-chain activity pertaining to channels.
#[cfg(test)]
mod tests {
- use bitcoin::BlockHeader;
+ use bitcoin::{BlockHeader, TxMerkleNode};
+ use bitcoin::hashes::Hash;
use ::{check_added_monitors, check_closed_broadcast, check_closed_event};
use ::{expect_payment_sent, expect_payment_claimed, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg};
use ::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err};
use ln::msgs::ChannelMessageHandler;
use util::errors::APIError;
use util::events::{ClosureReason, MessageSendEvent, MessageSendEventsProvider};
- use util::test_utils::{OnRegisterOutput, TxOutReference};
-
- /// Tests that in-block dependent transactions are processed by `block_connected` when not
- /// included in `txdata` but returned by [`chain::Filter::register_output`]. For instance,
- /// a (non-anchor) commitment transaction's HTLC output may be spent in the same block as the
- /// commitment transaction itself. An Electrum client may filter the commitment transaction but
- /// needs to return the HTLC transaction so it can be processed.
- #[test]
- fn connect_block_checks_dependent_transactions() {
- let chanmon_cfgs = create_chanmon_cfgs(2);
- let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
- let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let channel = create_announced_chan_between_nodes(
- &nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
-
- // Send a payment, saving nodes[0]'s revoked commitment and HTLC-Timeout transactions.
- let (commitment_tx, htlc_tx) = {
- let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000).0;
- let mut txn = get_local_commitment_txn!(nodes[0], channel.2);
- claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
-
- assert_eq!(txn.len(), 2);
- (txn.remove(0), txn.remove(0))
- };
-
- // Set expectations on nodes[1]'s chain source to return dependent transactions.
- let htlc_output = TxOutReference(commitment_tx.clone(), 0);
- let to_local_output = TxOutReference(commitment_tx.clone(), 1);
- let htlc_timeout_output = TxOutReference(htlc_tx.clone(), 0);
- nodes[1].chain_source
- .expect(OnRegisterOutput { with: htlc_output, returns: Some((1, htlc_tx)) })
- .expect(OnRegisterOutput { with: to_local_output, returns: None })
- .expect(OnRegisterOutput { with: htlc_timeout_output, returns: None });
-
- // Notify nodes[1] that nodes[0]'s revoked commitment transaction was mined. The chain
- // source should return the dependent HTLC transaction when the HTLC output is registered.
- mine_transaction(&nodes[1], &commitment_tx);
-
- // Clean up so uninteresting assertions don't fail.
- check_added_monitors!(nodes[1], 1);
- nodes[1].node.get_and_clear_pending_msg_events();
- nodes[1].node.get_and_clear_pending_events();
- }
#[test]
fn test_async_ooo_offchain_updates() {
let new_header = BlockHeader {
version: 2, time: 0, bits: 0, nonce: 0,
prev_blockhash: nodes[0].best_block_info().0,
- merkle_root: Default::default() };
+ merkle_root: TxMerkleNode::all_zeros() };
nodes[0].chain_monitor.chain_monitor.transactions_confirmed(&new_header,
&[(0, &remote_txn[0]), (1, &remote_txn[1])], nodes[0].best_block_info().1 + 1);
assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
let latest_header = BlockHeader {
version: 2, time: 0, bits: 0, nonce: 0,
prev_blockhash: nodes[0].best_block_info().0,
- merkle_root: Default::default() };
+ merkle_root: TxMerkleNode::all_zeros() };
nodes[0].chain_monitor.chain_monitor.best_block_updated(&latest_header, nodes[0].best_block_info().1 + LATENCY_GRACE_PERIOD_BLOCKS);
} else {
let persistences = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clone();
use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::transaction::{TxOut,Transaction};
+use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
use bitcoin::blockdata::script::{Script, Builder};
use bitcoin::blockdata::opcodes;
use prelude::*;
use core::{cmp, mem};
use io::{self, Error};
+use core::convert::TryInto;
use core::ops::Deref;
use sync::Mutex;
-/// An update generated by the underlying Channel itself which contains some new information the
-/// ChannelMonitor should be made aware of.
+/// An update generated by the underlying channel itself which contains some new information the
+/// [`ChannelMonitor`] should be made aware of.
+///
+/// Because this represents only a small number of updates to the underlying state, it is generally
+/// much smaller than a full [`ChannelMonitor`]. However, for large single commitment transaction
+/// updates (e.g. ones during which there are hundreds of HTLCs pending on the commitment
+/// transaction), a single update may reach upwards of 1 MiB in serialized size.
#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq))]
#[derive(Clone)]
#[must_use]
txid: Txid,
height: u32,
event: OnchainEvent,
+ transaction: Option<Transaction>, // Added as optional, but always filled in, in LDK 0.0.110
}
impl OnchainEventEntry {
}
}
+/// The (output index, sats value) for the counterparty's output in a commitment transaction.
+///
+/// This was added as an `Option` in 0.0.110.
+type CommitmentTxCounterpartyOutputInfo = Option<(u32, u64)>;
+
/// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
/// once they mature to enough confirmations (ANTI_REORG_DELAY)
#[derive(PartialEq)]
/// transaction which appeared on chain.
commitment_tx_output_idx: Option<u32>,
},
+ /// An output waiting on [`ANTI_REORG_DELAY`] confirmations before we hand the user the
+ /// [`SpendableOutputDescriptor`].
MaturingOutput {
descriptor: SpendableOutputDescriptor,
},
/// The CSV delay for the output of the funding spend transaction (implying it is a local
/// commitment transaction, and this is the delay on the to_self output).
on_local_output_csv: Option<u16>,
+ /// If the funding spend transaction was a known remote commitment transaction, we track
+ /// the output index and amount of the counterparty's `to_self` output here.
+ ///
+ /// This allows us to generate a [`Balance::CounterpartyRevokedOutputClaimable`] for the
+ /// counterparty output.
+ commitment_tx_to_counterparty_output: CommitmentTxCounterpartyOutputInfo,
},
/// A spend of a commitment transaction HTLC output, set in the cases where *no* `HTLCUpdate`
/// is constructed. This is used when
/// * an inbound HTLC is claimed by us (with a preimage).
/// * a revoked-state HTLC transaction was broadcasted, which was claimed by the revocation
/// signature.
+ /// * a revoked-state HTLC transaction was broadcasted, which was claimed by an
+ /// HTLC-Success/HTLC-Failure transaction (and is still claimable with a revocation
+ /// signature).
HTLCSpendConfirmation {
commitment_tx_output_idx: u32,
/// If the claim was made by either party with a preimage, this is filled in
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
write_tlv_fields!(writer, {
(0, self.txid, required),
+ (1, self.transaction, option),
(2, self.height, required),
(4, self.event, required),
});
impl MaybeReadable for OnchainEventEntry {
fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
- let mut txid = Default::default();
+ let mut txid = Txid::all_zeros();
+ let mut transaction = None;
let mut height = 0;
let mut event = None;
read_tlv_fields!(reader, {
(0, txid, required),
+ (1, transaction, option),
(2, height, required),
(4, event, ignorable),
});
if let Some(ev) = event {
- Ok(Some(Self { txid, height, event: ev }))
+ Ok(Some(Self { txid, transaction, height, event: ev }))
} else {
Ok(None)
}
},
(3, FundingSpendConfirmation) => {
(0, on_local_output_csv, option),
+ (1, commitment_tx_to_counterparty_output, option),
},
(5, HTLCSpendConfirmation) => {
(0, commitment_tx_output_idx, required),
/// HTLCs which we sent to our counterparty which are claimable after a timeout (less on-chain
/// fees) if the counterparty does not know the preimage for the HTLCs. These are somewhat
/// likely to be claimed by our counterparty before we do.
- MaybeClaimableHTLCAwaitingTimeout {
- /// The amount available to claim, in satoshis, excluding the on-chain fees which will be
- /// required to do so.
+ MaybeTimeoutClaimableHTLC {
+ /// The amount potentially available to claim, in satoshis, excluding the on-chain fees
+ /// which will be required to do so.
claimable_amount_satoshis: u64,
/// The height at which we will be able to claim the balance if our counterparty has not
/// done so.
claimable_height: u32,
},
+ /// HTLCs which we received from our counterparty which are claimable with a preimage which we
+ /// do not currently have. This will only be claimable if we receive the preimage from the node
+ /// to which we forwarded this HTLC before the timeout.
+ MaybePreimageClaimableHTLC {
+ /// The amount potentially available to claim, in satoshis, excluding the on-chain fees
+ /// which will be required to do so.
+ claimable_amount_satoshis: u64,
+ /// The height at which our counterparty will be able to claim the balance if we have not
+ /// yet received the preimage and claimed it ourselves.
+ expiry_height: u32,
+ },
+ /// The channel has been closed, and our counterparty broadcasted a revoked commitment
+ /// transaction.
+ ///
+ /// Thus, we're able to claim all outputs in the commitment transaction, one of which has the
+ /// following amount.
+ CounterpartyRevokedOutputClaimable {
+ /// The amount, in satoshis, of the output which we can claim.
+ ///
+ /// Note that for outputs from HTLC balances this may be excluding some on-chain fees that
+ /// were already spent.
+ claimable_amount_satoshis: u64,
+ },
}
/// An HTLC which has been irrevocably resolved on-chain, and has reached ANTI_REORG_DELAY.
#[derive(PartialEq)]
struct IrrevocablyResolvedHTLC {
commitment_tx_output_idx: u32,
+ /// The txid of the transaction which resolved the HTLC, this may be a commitment (if the HTLC
+ /// was not present in the confirmed commitment transaction), HTLC-Success, or HTLC-Timeout
+ /// transaction.
+ resolving_txid: Option<Txid>, // Added as optional, but always filled in, in 0.0.110
/// Only set if the HTLC claim was ours using a payment preimage
payment_preimage: Option<PaymentPreimage>,
}
impl_writeable_tlv_based!(IrrevocablyResolvedHTLC, {
(0, commitment_tx_output_idx, required),
+ (1, resolving_txid, option),
(2, payment_preimage, option),
});
funding_spend_seen: bool,
funding_spend_confirmed: Option<Txid>,
+ confirmed_commitment_tx_counterparty_output: CommitmentTxCounterpartyOutputInfo,
/// The set of HTLCs which have been either claimed or failed on chain and have reached
/// the requisite confirmations on the claim/fail transaction (either ANTI_REORG_DELAY or the
/// spending CSV for revocable outputs).
self.holder_tx_signed != other.holder_tx_signed ||
self.funding_spend_seen != other.funding_spend_seen ||
self.funding_spend_confirmed != other.funding_spend_confirmed ||
+ self.confirmed_commitment_tx_counterparty_output != other.confirmed_commitment_tx_counterparty_output ||
self.htlcs_resolved_on_chain != other.htlcs_resolved_on_chain
{
false
(5, self.pending_monitor_events, vec_type),
(7, self.funding_spend_seen, required),
(9, self.counterparty_node_id, option),
+ (11, self.confirmed_commitment_tx_counterparty_output, option),
});
Ok(())
holder_tx_signed: false,
funding_spend_seen: false,
funding_spend_confirmed: None,
+ confirmed_commitment_tx_counterparty_output: None,
htlcs_resolved_on_chain: Vec::new(),
best_block,
pub fn current_best_block(&self) -> BestBlock {
self.inner.lock().unwrap().best_block.clone()
}
+}
+
+impl<Signer: Sign> ChannelMonitorImpl<Signer> {
+ /// Helper for get_claimable_balances which does the work for an individual HTLC, generating up
+ /// to one `Balance` for the HTLC.
+ fn get_htlc_balance(&self, htlc: &HTLCOutputInCommitment, holder_commitment: bool,
+ counterparty_revoked_commitment: bool, confirmed_txid: Option<Txid>)
+ -> Option<Balance> {
+ let htlc_commitment_tx_output_idx =
+ if let Some(v) = htlc.transaction_output_index { v } else { return None; };
+
+ let mut htlc_spend_txid_opt = None;
+ let mut holder_timeout_spend_pending = None;
+ let mut htlc_spend_pending = None;
+ let mut holder_delayed_output_pending = None;
+ for event in self.onchain_events_awaiting_threshold_conf.iter() {
+ match event.event {
+ OnchainEvent::HTLCUpdate { commitment_tx_output_idx, htlc_value_satoshis, .. }
+ if commitment_tx_output_idx == Some(htlc_commitment_tx_output_idx) => {
+ debug_assert!(htlc_spend_txid_opt.is_none());
+ htlc_spend_txid_opt = event.transaction.as_ref().map(|tx| tx.txid());
+ debug_assert!(holder_timeout_spend_pending.is_none());
+ debug_assert_eq!(htlc_value_satoshis.unwrap(), htlc.amount_msat / 1000);
+ holder_timeout_spend_pending = Some(event.confirmation_threshold());
+ },
+ OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx, preimage, .. }
+ if commitment_tx_output_idx == htlc_commitment_tx_output_idx => {
+ debug_assert!(htlc_spend_txid_opt.is_none());
+ htlc_spend_txid_opt = event.transaction.as_ref().map(|tx| tx.txid());
+ debug_assert!(htlc_spend_pending.is_none());
+ htlc_spend_pending = Some((event.confirmation_threshold(), preimage.is_some()));
+ },
+ OnchainEvent::MaturingOutput {
+ descriptor: SpendableOutputDescriptor::DelayedPaymentOutput(ref descriptor) }
+ if descriptor.outpoint.index as u32 == htlc_commitment_tx_output_idx => {
+ debug_assert!(holder_delayed_output_pending.is_none());
+ holder_delayed_output_pending = Some(event.confirmation_threshold());
+ },
+ _ => {},
+ }
+ }
+ let htlc_resolved = self.htlcs_resolved_on_chain.iter()
+ .find(|v| if v.commitment_tx_output_idx == htlc_commitment_tx_output_idx {
+ debug_assert!(htlc_spend_txid_opt.is_none());
+ htlc_spend_txid_opt = v.resolving_txid;
+ true
+ } else { false });
+ debug_assert!(holder_timeout_spend_pending.is_some() as u8 + htlc_spend_pending.is_some() as u8 + htlc_resolved.is_some() as u8 <= 1);
+
+ let htlc_output_to_spend =
+ if let Some(txid) = htlc_spend_txid_opt {
+ debug_assert!(
+ self.onchain_tx_handler.channel_transaction_parameters.opt_anchors.is_none(),
+ "This code needs updating for anchors");
+ BitcoinOutPoint::new(txid, 0)
+ } else {
+ BitcoinOutPoint::new(confirmed_txid.unwrap(), htlc_commitment_tx_output_idx)
+ };
+ let htlc_output_spend_pending = self.onchain_tx_handler.is_output_spend_pending(&htlc_output_to_spend);
+
+ if let Some(conf_thresh) = holder_delayed_output_pending {
+ debug_assert!(holder_commitment);
+ return Some(Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: htlc.amount_msat / 1000,
+ confirmation_height: conf_thresh,
+ });
+ } else if htlc_resolved.is_some() && !htlc_output_spend_pending {
+ // Funding transaction spends should be fully confirmed by the time any
+ // HTLC transactions are resolved, unless we're talking about a holder
+ // commitment tx, whose resolution is delayed until the CSV timeout is
+ // reached, even though HTLCs may be resolved after only
+ // ANTI_REORG_DELAY confirmations.
+ debug_assert!(holder_commitment || self.funding_spend_confirmed.is_some());
+ } else if counterparty_revoked_commitment {
+ let htlc_output_claim_pending = self.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
+ if let OnchainEvent::MaturingOutput {
+ descriptor: SpendableOutputDescriptor::StaticOutput { .. }
+ } = &event.event {
+ if event.transaction.as_ref().map(|tx| tx.input.iter().any(|inp| {
+ if let Some(htlc_spend_txid) = htlc_spend_txid_opt {
+ Some(tx.txid()) == htlc_spend_txid_opt ||
+ inp.previous_output.txid == htlc_spend_txid
+ } else {
+ Some(inp.previous_output.txid) == confirmed_txid &&
+ inp.previous_output.vout == htlc_commitment_tx_output_idx
+ }
+ })).unwrap_or(false) {
+ Some(())
+ } else { None }
+ } else { None }
+ });
+ if htlc_output_claim_pending.is_some() {
+ // We already push `Balance`s onto the `res` list for every
+ // `StaticOutput` in a `MaturingOutput` in the revoked
+ // counterparty commitment transaction case generally, so don't
+ // need to do so again here.
+ } else {
+ debug_assert!(holder_timeout_spend_pending.is_none(),
+ "HTLCUpdate OnchainEvents should never appear for preimage claims");
+ debug_assert!(!htlc.offered || htlc_spend_pending.is_none() || !htlc_spend_pending.unwrap().1,
+ "We don't (currently) generate preimage claims against revoked outputs, where did you get one?!");
+ return Some(Balance::CounterpartyRevokedOutputClaimable {
+ claimable_amount_satoshis: htlc.amount_msat / 1000,
+ });
+ }
+ } else if htlc.offered == holder_commitment {
+ // If the payment was outbound, check if there's an HTLCUpdate
+ // indicating we have spent this HTLC with a timeout, claiming it back
+ // and awaiting confirmations on it.
+ if let Some(conf_thresh) = holder_timeout_spend_pending {
+ return Some(Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: htlc.amount_msat / 1000,
+ confirmation_height: conf_thresh,
+ });
+ } else {
+ return Some(Balance::MaybeTimeoutClaimableHTLC {
+ claimable_amount_satoshis: htlc.amount_msat / 1000,
+ claimable_height: htlc.cltv_expiry,
+ });
+ }
+ } else if self.payment_preimages.get(&htlc.payment_hash).is_some() {
+ // Otherwise (the payment was inbound), only expose it as claimable if
+ // we know the preimage.
+ // Note that if there is a pending claim, but it did not use the
+ // preimage, we lost funds to our counterparty! We will then continue
+ // to show it as ContentiousClaimable until ANTI_REORG_DELAY.
+ debug_assert!(holder_timeout_spend_pending.is_none());
+ if let Some((conf_thresh, true)) = htlc_spend_pending {
+ return Some(Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: htlc.amount_msat / 1000,
+ confirmation_height: conf_thresh,
+ });
+ } else {
+ return Some(Balance::ContentiousClaimable {
+ claimable_amount_satoshis: htlc.amount_msat / 1000,
+ timeout_height: htlc.cltv_expiry,
+ });
+ }
+ } else if htlc_resolved.is_none() {
+ return Some(Balance::MaybePreimageClaimableHTLC {
+ claimable_amount_satoshis: htlc.amount_msat / 1000,
+ expiry_height: htlc.cltv_expiry,
+ });
+ }
+ None
+ }
+}
+impl<Signer: Sign> ChannelMonitor<Signer> {
/// Gets the balances in this channel which are either claimable by us if we were to
/// force-close the channel now or which are claimable on-chain (possibly awaiting
/// confirmation).
/// balance, or until our counterparty has claimed the balance and accrued several
/// confirmations on the claim transaction.
///
- /// Note that the balances available when you or your counterparty have broadcasted revoked
- /// state(s) may not be fully captured here.
- // TODO, fix that ^
+ /// Note that for `ChannelMonitors` which track a channel which went on-chain with versions of
+ /// LDK prior to 0.0.108, balances may not be fully captured if our counterparty broadcasted
+ /// a revoked state.
///
/// See [`Balance`] for additional details on the types of claimable balances which
/// may be returned here and their meanings.
let us = self.inner.lock().unwrap();
let mut confirmed_txid = us.funding_spend_confirmed;
+ let mut confirmed_counterparty_output = us.confirmed_commitment_tx_counterparty_output;
let mut pending_commitment_tx_conf_thresh = None;
let funding_spend_pending = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
- if let OnchainEvent::FundingSpendConfirmation { .. } = event.event {
+ if let OnchainEvent::FundingSpendConfirmation { commitment_tx_to_counterparty_output, .. } =
+ event.event
+ {
+ confirmed_counterparty_output = commitment_tx_to_counterparty_output;
Some((event.txid, event.confirmation_threshold()))
} else { None }
});
}
macro_rules! walk_htlcs {
- ($holder_commitment: expr, $htlc_iter: expr) => {
+ ($holder_commitment: expr, $counterparty_revoked_commitment: expr, $htlc_iter: expr) => {
for htlc in $htlc_iter {
- if let Some(htlc_commitment_tx_output_idx) = htlc.transaction_output_index {
- if let Some(conf_thresh) = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
- if let OnchainEvent::MaturingOutput { descriptor: SpendableOutputDescriptor::DelayedPaymentOutput(descriptor) } = &event.event {
- if descriptor.outpoint.index as u32 == htlc_commitment_tx_output_idx { Some(event.confirmation_threshold()) } else { None }
- } else { None }
- }) {
- debug_assert!($holder_commitment);
- res.push(Balance::ClaimableAwaitingConfirmations {
- claimable_amount_satoshis: htlc.amount_msat / 1000,
- confirmation_height: conf_thresh,
- });
- } else if us.htlcs_resolved_on_chain.iter().any(|v| v.commitment_tx_output_idx == htlc_commitment_tx_output_idx) {
- // Funding transaction spends should be fully confirmed by the time any
- // HTLC transactions are resolved, unless we're talking about a holder
- // commitment tx, whose resolution is delayed until the CSV timeout is
- // reached, even though HTLCs may be resolved after only
- // ANTI_REORG_DELAY confirmations.
- debug_assert!($holder_commitment || us.funding_spend_confirmed.is_some());
- } else if htlc.offered == $holder_commitment {
- // If the payment was outbound, check if there's an HTLCUpdate
- // indicating we have spent this HTLC with a timeout, claiming it back
- // and awaiting confirmations on it.
- let htlc_update_pending = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
- if let OnchainEvent::HTLCUpdate { commitment_tx_output_idx: Some(commitment_tx_output_idx), .. } = event.event {
- if commitment_tx_output_idx == htlc_commitment_tx_output_idx {
- Some(event.confirmation_threshold()) } else { None }
- } else { None }
- });
- if let Some(conf_thresh) = htlc_update_pending {
- res.push(Balance::ClaimableAwaitingConfirmations {
- claimable_amount_satoshis: htlc.amount_msat / 1000,
- confirmation_height: conf_thresh,
- });
- } else {
- res.push(Balance::MaybeClaimableHTLCAwaitingTimeout {
- claimable_amount_satoshis: htlc.amount_msat / 1000,
- claimable_height: htlc.cltv_expiry,
- });
- }
- } else if us.payment_preimages.get(&htlc.payment_hash).is_some() {
- // Otherwise (the payment was inbound), only expose it as claimable if
- // we know the preimage.
- // Note that if there is a pending claim, but it did not use the
- // preimage, we lost funds to our counterparty! We will then continue
- // to show it as ContentiousClaimable until ANTI_REORG_DELAY.
- let htlc_spend_pending = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
- if let OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx, preimage, .. } = event.event {
- if commitment_tx_output_idx == htlc_commitment_tx_output_idx {
- Some((event.confirmation_threshold(), preimage.is_some()))
- } else { None }
- } else { None }
- });
- if let Some((conf_thresh, true)) = htlc_spend_pending {
- res.push(Balance::ClaimableAwaitingConfirmations {
- claimable_amount_satoshis: htlc.amount_msat / 1000,
- confirmation_height: conf_thresh,
- });
- } else {
- res.push(Balance::ContentiousClaimable {
- claimable_amount_satoshis: htlc.amount_msat / 1000,
- timeout_height: htlc.cltv_expiry,
- });
- }
+ if htlc.transaction_output_index.is_some() {
+
+ if let Some(bal) = us.get_htlc_balance(htlc, $holder_commitment, $counterparty_revoked_commitment, confirmed_txid) {
+ res.push(bal);
}
}
}
if let Some(txid) = confirmed_txid {
let mut found_commitment_tx = false;
- if Some(txid) == us.current_counterparty_commitment_txid || Some(txid) == us.prev_counterparty_commitment_txid {
- walk_htlcs!(false, us.counterparty_claimable_outpoints.get(&txid).unwrap().iter().map(|(a, _)| a));
+ if let Some(counterparty_tx_htlcs) = us.counterparty_claimable_outpoints.get(&txid) {
+ // First look for the to_remote output back to us.
if let Some(conf_thresh) = pending_commitment_tx_conf_thresh {
if let Some(value) = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
if let OnchainEvent::MaturingOutput {
// confirmation with the same height or have never met our dust amount.
}
}
+ if Some(txid) == us.current_counterparty_commitment_txid || Some(txid) == us.prev_counterparty_commitment_txid {
+ walk_htlcs!(false, false, counterparty_tx_htlcs.iter().map(|(a, _)| a));
+ } else {
+ walk_htlcs!(false, true, counterparty_tx_htlcs.iter().map(|(a, _)| a));
+ // The counterparty broadcasted a revoked state!
+ // Look for any StaticOutputs first, generating claimable balances for those.
+ // If any match the confirmed counterparty revoked to_self output, skip
+ // generating a CounterpartyRevokedOutputClaimable.
+ let mut spent_counterparty_output = false;
+ for event in us.onchain_events_awaiting_threshold_conf.iter() {
+ if let OnchainEvent::MaturingOutput {
+ descriptor: SpendableOutputDescriptor::StaticOutput { output, .. }
+ } = &event.event {
+ res.push(Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: output.value,
+ confirmation_height: event.confirmation_threshold(),
+ });
+ if let Some(confirmed_to_self_idx) = confirmed_counterparty_output.map(|(idx, _)| idx) {
+ if event.transaction.as_ref().map(|tx|
+ tx.input.iter().any(|inp| inp.previous_output.vout == confirmed_to_self_idx)
+ ).unwrap_or(false) {
+ spent_counterparty_output = true;
+ }
+ }
+ }
+ }
+
+ if spent_counterparty_output {
+ } else if let Some((confirmed_to_self_idx, amt)) = confirmed_counterparty_output {
+ let output_spendable = us.onchain_tx_handler
+ .is_output_spend_pending(&BitcoinOutPoint::new(txid, confirmed_to_self_idx));
+ if output_spendable {
+ res.push(Balance::CounterpartyRevokedOutputClaimable {
+ claimable_amount_satoshis: amt,
+ });
+ }
+ } else {
+ // Counterparty output is missing, either it was broadcasted on a
+ // previous version of LDK or the counterparty hadn't met dust.
+ }
+ }
found_commitment_tx = true;
} else if txid == us.current_holder_commitment_tx.txid {
- walk_htlcs!(true, us.current_holder_commitment_tx.htlc_outputs.iter().map(|(a, _, _)| a));
+ walk_htlcs!(true, false, us.current_holder_commitment_tx.htlc_outputs.iter().map(|(a, _, _)| a));
if let Some(conf_thresh) = pending_commitment_tx_conf_thresh {
res.push(Balance::ClaimableAwaitingConfirmations {
claimable_amount_satoshis: us.current_holder_commitment_tx.to_self_value_sat,
found_commitment_tx = true;
} else if let Some(prev_commitment) = &us.prev_holder_signed_commitment_tx {
if txid == prev_commitment.txid {
- walk_htlcs!(true, prev_commitment.htlc_outputs.iter().map(|(a, _, _)| a));
+ walk_htlcs!(true, false, prev_commitment.htlc_outputs.iter().map(|(a, _, _)| a));
if let Some(conf_thresh) = pending_commitment_tx_conf_thresh {
res.push(Balance::ClaimableAwaitingConfirmations {
claimable_amount_satoshis: prev_commitment.to_self_value_sat,
});
}
}
- // TODO: Add logic to provide claimable balances for counterparty broadcasting revoked
- // outputs.
} else {
let mut claimable_inbound_htlc_value_sat = 0;
for (htlc, _, _) in us.current_holder_commitment_tx.htlc_outputs.iter() {
if htlc.transaction_output_index.is_none() { continue; }
if htlc.offered {
- res.push(Balance::MaybeClaimableHTLCAwaitingTimeout {
+ res.push(Balance::MaybeTimeoutClaimableHTLC {
claimable_amount_satoshis: htlc.amount_msat / 1000,
claimable_height: htlc.cltv_expiry,
});
} else if us.payment_preimages.get(&htlc.payment_hash).is_some() {
claimable_inbound_htlc_value_sat += htlc.amount_msat / 1000;
+ } else {
+ // As long as the HTLC is still in our latest commitment state, treat
+ // it as potentially claimable, even if it has long-since expired.
+ res.push(Balance::MaybePreimageClaimableHTLC {
+ claimable_amount_satoshis: htlc.amount_msat / 1000,
+ expiry_height: htlc.cltv_expiry,
+ });
}
}
res.push(Balance::ClaimableOnChannelClose {
/// as long as we examine both the current counterparty commitment transaction and, if it hasn't
/// been revoked yet, the previous one, we we will never "forget" to resolve an HTLC.
macro_rules! fail_unbroadcast_htlcs {
- ($self: expr, $commitment_tx_type: expr, $commitment_txid_confirmed: expr,
+ ($self: expr, $commitment_tx_type: expr, $commitment_txid_confirmed: expr, $commitment_tx_confirmed: expr,
$commitment_tx_conf_height: expr, $confirmed_htlcs_list: expr, $logger: expr) => { {
+ debug_assert_eq!($commitment_tx_confirmed.txid(), $commitment_txid_confirmed);
+
macro_rules! check_htlc_fails {
($txid: expr, $commitment_tx: expr) => {
if let Some(ref latest_outpoints) = $self.counterparty_claimable_outpoints.get($txid) {
});
let entry = OnchainEventEntry {
txid: $commitment_txid_confirmed,
+ transaction: Some($commitment_tx_confirmed.clone()),
height: $commitment_tx_conf_height,
event: OnchainEvent::HTLCUpdate {
source: (**source).clone(),
#[cfg(test)]
pub fn deliberately_bogus_accepted_htlc_witness_program() -> Vec<u8> {
- let mut ret = [opcodes::all::OP_NOP.into_u8(); 136];
- ret[131] = opcodes::all::OP_DROP.into_u8();
- ret[132] = opcodes::all::OP_DROP.into_u8();
- ret[133] = opcodes::all::OP_DROP.into_u8();
- ret[134] = opcodes::all::OP_DROP.into_u8();
- ret[135] = opcodes::OP_TRUE.into_u8();
+ let mut ret = [opcodes::all::OP_NOP.to_u8(); 136];
+ ret[131] = opcodes::all::OP_DROP.to_u8();
+ ret[132] = opcodes::all::OP_DROP.to_u8();
+ ret[133] = opcodes::all::OP_DROP.to_u8();
+ ret[134] = opcodes::all::OP_DROP.to_u8();
+ ret[135] = opcodes::OP_TRUE.to_u8();
Vec::from(&ret[..])
}
// First check if a counterparty commitment transaction has been broadcasted:
macro_rules! claim_htlcs {
($commitment_number: expr, $txid: expr) => {
- let htlc_claim_reqs = self.get_counterparty_htlc_output_claim_reqs($commitment_number, $txid, None);
+ let (htlc_claim_reqs, _) = self.get_counterparty_output_claim_info($commitment_number, $txid, None);
self.onchain_tx_handler.update_claims_view(&Vec::new(), htlc_claim_reqs, self.best_block.height(), self.best_block.height(), broadcaster, fee_estimator, logger);
}
}
/// data in counterparty_claimable_outpoints. Will directly claim any HTLC outputs which expire at a
/// height > height + CLTV_SHARED_CLAIM_BUFFER. In any case, will install monitoring for
/// HTLC-Success/HTLC-Timeout transactions.
- /// Return updates for HTLC pending in the channel and failed automatically by the broadcast of
- /// revoked counterparty commitment tx
- fn check_spend_counterparty_transaction<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec<PackageTemplate>, TransactionOutputs) where L::Target: Logger {
+ ///
+ /// Returns packages to claim the revoked output(s), as well as additional outputs to watch and
+ /// general information about the output that is to the counterparty in the commitment
+ /// transaction.
+ fn check_spend_counterparty_transaction<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L)
+ -> (Vec<PackageTemplate>, TransactionOutputs, CommitmentTxCounterpartyOutputInfo)
+ where L::Target: Logger {
// Most secp and related errors trying to create keys means we have no hope of constructing
// a spend transaction...so we return no transactions to broadcast
let mut claimable_outpoints = Vec::new();
let mut watch_outputs = Vec::new();
+ let mut to_counterparty_output_info = None;
let commitment_txid = tx.txid(); //TODO: This is gonna be a performance bottleneck for watchtowers!
let per_commitment_option = self.counterparty_claimable_outpoints.get(&commitment_txid);
( $thing : expr ) => {
match $thing {
Ok(a) => a,
- Err(_) => return (claimable_outpoints, (commitment_txid, watch_outputs))
+ Err(_) => return (claimable_outpoints, (commitment_txid, watch_outputs), to_counterparty_output_info)
}
};
}
- let commitment_number = 0xffffffffffff - ((((tx.input[0].sequence as u64 & 0xffffff) << 3*8) | (tx.lock_time as u64 & 0xffffff)) ^ self.commitment_transaction_number_obscure_factor);
+ let commitment_number = 0xffffffffffff - ((((tx.input[0].sequence.0 as u64 & 0xffffff) << 3*8) | (tx.lock_time.0 as u64 & 0xffffff)) ^ self.commitment_transaction_number_obscure_factor);
if commitment_number >= self.get_min_seen_secret() {
let secret = self.get_secret(commitment_number).unwrap();
let per_commitment_key = ignore_error!(SecretKey::from_slice(&secret));
let revk_outp = RevokedOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, outp.value, self.counterparty_commitment_params.on_counterparty_tx_csv);
let justice_package = PackageTemplate::build_package(commitment_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32, true, height);
claimable_outpoints.push(justice_package);
+ to_counterparty_output_info =
+ Some((idx.try_into().expect("Txn can't have more than 2^32 outputs"), outp.value));
}
}
if let Some(transaction_output_index) = htlc.transaction_output_index {
if transaction_output_index as usize >= tx.output.len() ||
tx.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 {
- return (claimable_outpoints, (commitment_txid, watch_outputs)); // Corrupted per_commitment_data, fuck this user
+ // per_commitment_data is corrupt or our commitment signing key leaked!
+ return (claimable_outpoints, (commitment_txid, watch_outputs),
+ to_counterparty_output_info);
}
let revk_htlc_outp = RevokedHTLCOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, htlc.amount_msat / 1000, htlc.clone(), self.onchain_tx_handler.channel_transaction_parameters.opt_anchors.is_some());
let justice_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, PackageSolvingData::RevokedHTLCOutput(revk_htlc_outp), htlc.cltv_expiry, true, height);
self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number);
if let Some(per_commitment_data) = per_commitment_option {
- fail_unbroadcast_htlcs!(self, "revoked_counterparty", commitment_txid, height,
+ fail_unbroadcast_htlcs!(self, "revoked_counterparty", commitment_txid, tx, height,
per_commitment_data.iter().map(|(htlc, htlc_source)|
(htlc, htlc_source.as_ref().map(|htlc_source| htlc_source.as_ref()))
), logger);
} else {
debug_assert!(false, "We should have per-commitment option for any recognized old commitment txn");
- fail_unbroadcast_htlcs!(self, "revoked counterparty", commitment_txid, height,
+ fail_unbroadcast_htlcs!(self, "revoked counterparty", commitment_txid, tx, height,
[].iter().map(|reference| *reference), logger);
}
}
self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number);
log_info!(logger, "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid);
- fail_unbroadcast_htlcs!(self, "counterparty", commitment_txid, height,
+ fail_unbroadcast_htlcs!(self, "counterparty", commitment_txid, tx, height,
per_commitment_data.iter().map(|(htlc, htlc_source)|
(htlc, htlc_source.as_ref().map(|htlc_source| htlc_source.as_ref()))
), logger);
- let htlc_claim_reqs = self.get_counterparty_htlc_output_claim_reqs(commitment_number, commitment_txid, Some(tx));
+ let (htlc_claim_reqs, counterparty_output_info) =
+ self.get_counterparty_output_claim_info(commitment_number, commitment_txid, Some(tx));
+ to_counterparty_output_info = counterparty_output_info;
for req in htlc_claim_reqs {
claimable_outpoints.push(req);
}
}
- (claimable_outpoints, (commitment_txid, watch_outputs))
+ (claimable_outpoints, (commitment_txid, watch_outputs), to_counterparty_output_info)
}
- fn get_counterparty_htlc_output_claim_reqs(&self, commitment_number: u64, commitment_txid: Txid, tx: Option<&Transaction>) -> Vec<PackageTemplate> {
+ /// Returns the HTLC claim package templates and the counterparty output info
+ fn get_counterparty_output_claim_info(&self, commitment_number: u64, commitment_txid: Txid, tx: Option<&Transaction>)
+ -> (Vec<PackageTemplate>, CommitmentTxCounterpartyOutputInfo) {
let mut claimable_outpoints = Vec::new();
- if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&commitment_txid) {
- if let Some(per_commitment_points) = self.their_cur_per_commitment_points {
- let per_commitment_point_option =
- // If the counterparty commitment tx is the latest valid state, use their latest
- // per-commitment point
- if per_commitment_points.0 == commitment_number { Some(&per_commitment_points.1) }
- else if let Some(point) = per_commitment_points.2.as_ref() {
- // If counterparty commitment tx is the state previous to the latest valid state, use
- // their previous per-commitment point (non-atomicity of revocation means it's valid for
- // them to temporarily have two valid commitment txns from our viewpoint)
- if per_commitment_points.0 == commitment_number + 1 { Some(point) } else { None }
- } else { None };
- if let Some(per_commitment_point) = per_commitment_point_option {
- for (_, &(ref htlc, _)) in htlc_outputs.iter().enumerate() {
- if let Some(transaction_output_index) = htlc.transaction_output_index {
- if let Some(transaction) = tx {
- if transaction_output_index as usize >= transaction.output.len() ||
- transaction.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 {
- return claimable_outpoints; // Corrupted per_commitment_data, fuck this user
- }
- }
- let preimage = if htlc.offered { if let Some(p) = self.payment_preimages.get(&htlc.payment_hash) { Some(*p) } else { None } } else { None };
- if preimage.is_some() || !htlc.offered {
- let counterparty_htlc_outp = if htlc.offered {
- PackageSolvingData::CounterpartyOfferedHTLCOutput(
- CounterpartyOfferedHTLCOutput::build(*per_commitment_point,
- self.counterparty_commitment_params.counterparty_delayed_payment_base_key,
- self.counterparty_commitment_params.counterparty_htlc_base_key,
- preimage.unwrap(), htlc.clone()))
- } else {
- PackageSolvingData::CounterpartyReceivedHTLCOutput(
- CounterpartyReceivedHTLCOutput::build(*per_commitment_point,
- self.counterparty_commitment_params.counterparty_delayed_payment_base_key,
- self.counterparty_commitment_params.counterparty_htlc_base_key,
- htlc.clone()))
- };
- let aggregation = if !htlc.offered { false } else { true };
- let counterparty_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, counterparty_htlc_outp, htlc.cltv_expiry,aggregation, 0);
- claimable_outpoints.push(counterparty_package);
- }
- }
+ let mut to_counterparty_output_info: CommitmentTxCounterpartyOutputInfo = None;
+
+ let htlc_outputs = match self.counterparty_claimable_outpoints.get(&commitment_txid) {
+ Some(outputs) => outputs,
+ None => return (claimable_outpoints, to_counterparty_output_info),
+ };
+ let per_commitment_points = match self.their_cur_per_commitment_points {
+ Some(points) => points,
+ None => return (claimable_outpoints, to_counterparty_output_info),
+ };
+
+ let per_commitment_point =
+ // If the counterparty commitment tx is the latest valid state, use their latest
+ // per-commitment point
+ if per_commitment_points.0 == commitment_number { &per_commitment_points.1 }
+ else if let Some(point) = per_commitment_points.2.as_ref() {
+ // If counterparty commitment tx is the state previous to the latest valid state, use
+ // their previous per-commitment point (non-atomicity of revocation means it's valid for
+ // them to temporarily have two valid commitment txns from our viewpoint)
+ if per_commitment_points.0 == commitment_number + 1 {
+ point
+ } else { return (claimable_outpoints, to_counterparty_output_info); }
+ } else { return (claimable_outpoints, to_counterparty_output_info); };
+
+ if let Some(transaction) = tx {
+ let revokeable_p2wsh_opt =
+ if let Ok(revocation_pubkey) = chan_utils::derive_public_revocation_key(
+ &self.secp_ctx, &per_commitment_point, &self.holder_revocation_basepoint)
+ {
+ if let Ok(delayed_key) = chan_utils::derive_public_key(&self.secp_ctx,
+ &per_commitment_point,
+ &self.counterparty_commitment_params.counterparty_delayed_payment_base_key)
+ {
+ Some(chan_utils::get_revokeable_redeemscript(&revocation_pubkey,
+ self.counterparty_commitment_params.on_counterparty_tx_csv,
+ &delayed_key).to_v0_p2wsh())
+ } else {
+ debug_assert!(false, "Failed to derive a delayed payment key for a commitment state we accepted");
+ None
+ }
+ } else {
+ debug_assert!(false, "Failed to derive a revocation pubkey key for a commitment state we accepted");
+ None
+ };
+ if let Some(revokeable_p2wsh) = revokeable_p2wsh_opt {
+ for (idx, outp) in transaction.output.iter().enumerate() {
+ if outp.script_pubkey == revokeable_p2wsh {
+ to_counterparty_output_info =
+ Some((idx.try_into().expect("Can't have > 2^32 outputs"), outp.value));
}
}
}
}
- claimable_outpoints
+
+ for (_, &(ref htlc, _)) in htlc_outputs.iter().enumerate() {
+ if let Some(transaction_output_index) = htlc.transaction_output_index {
+ if let Some(transaction) = tx {
+ if transaction_output_index as usize >= transaction.output.len() ||
+ transaction.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 {
+ // per_commitment_data is corrupt or our commitment signing key leaked!
+ return (claimable_outpoints, to_counterparty_output_info);
+ }
+ }
+ let preimage = if htlc.offered { if let Some(p) = self.payment_preimages.get(&htlc.payment_hash) { Some(*p) } else { None } } else { None };
+ if preimage.is_some() || !htlc.offered {
+ let counterparty_htlc_outp = if htlc.offered {
+ PackageSolvingData::CounterpartyOfferedHTLCOutput(
+ CounterpartyOfferedHTLCOutput::build(*per_commitment_point,
+ self.counterparty_commitment_params.counterparty_delayed_payment_base_key,
+ self.counterparty_commitment_params.counterparty_htlc_base_key,
+ preimage.unwrap(), htlc.clone()))
+ } else {
+ PackageSolvingData::CounterpartyReceivedHTLCOutput(
+ CounterpartyReceivedHTLCOutput::build(*per_commitment_point,
+ self.counterparty_commitment_params.counterparty_delayed_payment_base_key,
+ self.counterparty_commitment_params.counterparty_htlc_base_key,
+ htlc.clone()))
+ };
+ let aggregation = if !htlc.offered { false } else { true };
+ let counterparty_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, counterparty_htlc_outp, htlc.cltv_expiry,aggregation, 0);
+ claimable_outpoints.push(counterparty_package);
+ }
+ }
+ }
+
+ (claimable_outpoints, to_counterparty_output_info)
}
/// Attempts to claim a counterparty HTLC-Success/HTLC-Timeout's outputs using the revocation key
let res = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, height);
let mut to_watch = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, tx);
append_onchain_update!(res, to_watch);
- fail_unbroadcast_htlcs!(self, "latest holder", commitment_txid, height,
+ fail_unbroadcast_htlcs!(self, "latest holder", commitment_txid, tx, height,
self.current_holder_commitment_tx.htlc_outputs.iter()
.map(|(htlc, _, htlc_source)| (htlc, htlc_source.as_ref())), logger);
} else if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx {
let res = self.get_broadcasted_holder_claims(holder_tx, height);
let mut to_watch = self.get_broadcasted_holder_watch_outputs(holder_tx, tx);
append_onchain_update!(res, to_watch);
- fail_unbroadcast_htlcs!(self, "previous holder", commitment_txid, height,
+ fail_unbroadcast_htlcs!(self, "previous holder", commitment_txid, tx, height,
holder_tx.htlc_outputs.iter().map(|(htlc, _, htlc_source)| (htlc, htlc_source.as_ref())),
logger);
}
log_info!(logger, "Channel {} closed by funding output spend in txid {}.",
log_bytes!(self.funding_info.0.to_channel_id()), tx.txid());
self.funding_spend_seen = true;
- if (tx.input[0].sequence >> 8*3) as u8 == 0x80 && (tx.lock_time >> 8*3) as u8 == 0x20 {
- let (mut new_outpoints, new_outputs) = self.check_spend_counterparty_transaction(&tx, height, &logger);
+ let mut commitment_tx_to_counterparty_output = None;
+ if (tx.input[0].sequence.0 >> 8*3) as u8 == 0x80 && (tx.lock_time.0 >> 8*3) as u8 == 0x20 {
+ let (mut new_outpoints, new_outputs, counterparty_output_idx_sats) =
+ self.check_spend_counterparty_transaction(&tx, height, &logger);
+ commitment_tx_to_counterparty_output = counterparty_output_idx_sats;
if !new_outputs.1.is_empty() {
watch_outputs.push(new_outputs);
}
claimable_outpoints.append(&mut new_outpoints);
if new_outpoints.is_empty() {
if let Some((mut new_outpoints, new_outputs)) = self.check_spend_holder_transaction(&tx, height, &logger) {
+ debug_assert!(commitment_tx_to_counterparty_output.is_none(),
+ "A commitment transaction matched as both a counterparty and local commitment tx?");
if !new_outputs.1.is_empty() {
watch_outputs.push(new_outputs);
}
let txid = tx.txid();
self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
txid,
+ transaction: Some((*tx).clone()),
height: height,
event: OnchainEvent::FundingSpendConfirmation {
on_local_output_csv: balance_spendable_csv,
+ commitment_tx_to_counterparty_output,
},
});
} else {
htlc_value_satoshis,
}));
if let Some(idx) = commitment_tx_output_idx {
- self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC { commitment_tx_output_idx: idx, payment_preimage: None });
+ self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC {
+ commitment_tx_output_idx: idx, resolving_txid: Some(entry.txid),
+ payment_preimage: None,
+ });
}
},
OnchainEvent::MaturingOutput { descriptor } => {
});
},
OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx, preimage, .. } => {
- self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC { commitment_tx_output_idx, payment_preimage: preimage });
+ self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC {
+ commitment_tx_output_idx, resolving_txid: Some(entry.txid),
+ payment_preimage: preimage,
+ });
},
- OnchainEvent::FundingSpendConfirmation { .. } => {
+ OnchainEvent::FundingSpendConfirmation { commitment_tx_to_counterparty_output, .. } => {
self.funding_spend_confirmed = Some(entry.txid);
+ self.confirmed_commitment_tx_counterparty_output = commitment_tx_to_counterparty_output;
},
}
}
log_error!(logger, "Input spending {} ({}:{}) in {} resolves {} HTLC with payment hash {} with {}!",
$tx_info, input.previous_output.txid, input.previous_output.vout, tx.txid(),
if outbound_htlc { "outbound" } else { "inbound" }, log_bytes!($htlc.payment_hash.0),
- if revocation_sig_claim { "revocation sig" } else { "preimage claim after we'd passed the HTLC resolution back" });
+ if revocation_sig_claim { "revocation sig" } else { "preimage claim after we'd passed the HTLC resolution back. We can likely claim the HTLC output with a revocation claim" });
} else {
log_info!(logger, "Input spending {} ({}:{}) in {} resolves {} HTLC with payment hash {} with {}",
$tx_info, input.previous_output.txid, input.previous_output.vout, tx.txid(),
if payment_data.is_none() {
log_claim!($tx_info, $holder_tx, htlc_output, false);
let outbound_htlc = $holder_tx == htlc_output.offered;
- if !outbound_htlc || revocation_sig_claim {
- self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
- txid: tx.txid(), height,
- event: OnchainEvent::HTLCSpendConfirmation {
- commitment_tx_output_idx: input.previous_output.vout,
- preimage: if accepted_preimage_claim || offered_preimage_claim {
- Some(payment_preimage) } else { None },
- // If this is a payment to us (!outbound_htlc, above),
- // wait for the CSV delay before dropping the HTLC from
- // claimable balance if the claim was an HTLC-Success
- // transaction.
- on_to_local_output_csv: if accepted_preimage_claim {
- Some(self.on_holder_tx_csv) } else { None },
- },
- });
- } else {
- // Outbound claims should always have payment_data, unless
- // we've already failed the HTLC as the commitment transaction
- // which was broadcasted was revoked. In that case, we should
- // spend the HTLC output here immediately, and expose that fact
- // as a Balance, something which we do not yet do.
- // TODO: Track the above as claimable!
- }
+ self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
+ txid: tx.txid(), height, transaction: Some(tx.clone()),
+ event: OnchainEvent::HTLCSpendConfirmation {
+ commitment_tx_output_idx: input.previous_output.vout,
+ preimage: if accepted_preimage_claim || offered_preimage_claim {
+ Some(payment_preimage) } else { None },
+ // If this is a payment to us (ie !outbound_htlc), wait for
+ // the CSV delay before dropping the HTLC from claimable
+ // balance if the claim was an HTLC-Success transaction (ie
+ // accepted_preimage_claim).
+ on_to_local_output_csv: if accepted_preimage_claim && !outbound_htlc {
+ Some(self.on_holder_tx_csv) } else { None },
+ },
+ });
continue 'outer_loop;
}
}
self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
txid: tx.txid(),
height,
+ transaction: Some(tx.clone()),
event: OnchainEvent::HTLCSpendConfirmation {
commitment_tx_output_idx: input.previous_output.vout,
preimage: Some(payment_preimage),
} else { false }) {
self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
txid: tx.txid(),
+ transaction: Some(tx.clone()),
height,
event: OnchainEvent::HTLCSpendConfirmation {
commitment_tx_output_idx: input.previous_output.vout,
});
let entry = OnchainEventEntry {
txid: tx.txid(),
+ transaction: Some(tx.clone()),
height,
event: OnchainEvent::HTLCUpdate {
source, payment_hash,
if let Some(spendable_output) = spendable_output {
let entry = OnchainEventEntry {
txid: tx.txid(),
+ transaction: Some(tx.clone()),
height: height,
event: OnchainEvent::MaturingOutput { descriptor: spendable_output.clone() },
};
let mut htlcs_resolved_on_chain = Some(Vec::new());
let mut funding_spend_seen = Some(false);
let mut counterparty_node_id = None;
+ let mut confirmed_commitment_tx_counterparty_output = None;
read_tlv_fields!(reader, {
(1, funding_spend_confirmed, option),
(3, htlcs_resolved_on_chain, vec_type),
(5, pending_monitor_events, vec_type),
(7, funding_spend_seen, option),
(9, counterparty_node_id, option),
+ (11, confirmed_commitment_tx_counterparty_output, option),
});
let mut secp_ctx = Secp256k1::new();
holder_tx_signed,
funding_spend_seen: funding_spend_seen.unwrap(),
funding_spend_confirmed,
+ confirmed_commitment_tx_counterparty_output,
htlcs_resolved_on_chain: htlcs_resolved_on_chain.unwrap(),
best_block,
use util::ser::{ReadableArgs, Writeable};
use sync::{Arc, Mutex};
use io;
- use bitcoin::Witness;
+ use bitcoin::{PackedLockTime, Sequence, TxMerkleNode, Witness};
use prelude::*;
fn do_test_funding_spend_refuses_updates(use_local_txn: bool) {
let new_header = BlockHeader {
version: 2, time: 0, bits: 0, nonce: 0,
prev_blockhash: nodes[0].best_block_info().0,
- merkle_root: Default::default() };
+ merkle_root: TxMerkleNode::all_zeros() };
let conf_height = nodes[0].best_block_info().1 + 1;
nodes[1].chain_monitor.chain_monitor.transactions_confirmed(&new_header,
&[(0, broadcast_tx)], conf_height);
let fee_estimator = TestFeeEstimator { sat_per_kw: Mutex::new(253) };
let dummy_key = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
- let dummy_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() };
+ let dummy_tx = Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() };
let mut preimages = Vec::new();
{
delayed_payment_basepoint: PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[47; 32]).unwrap()),
htlc_basepoint: PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[48; 32]).unwrap())
};
- let funding_outpoint = OutPoint { txid: Default::default(), index: u16::max_value() };
+ let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::max_value() };
let channel_parameters = ChannelTransactionParameters {
holder_pubkeys: keys.holder_channel_pubkeys.clone(),
holder_selected_contest_delay: 66,
// Justice tx with 1 to_holder, 2 revoked offered HTLCs, 1 revoked received HTLCs
for &opt_anchors in [false, true].iter() {
- let mut claim_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() };
+ let mut claim_tx = Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() };
let mut sum_actual_sigs = 0;
for i in 0..4 {
claim_tx.input.push(TxIn {
vout: i,
},
script_sig: Script::new(),
- sequence: 0xfffffffd,
+ sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
witness: Witness::new(),
});
}
// Claim tx with 1 offered HTLCs, 3 received HTLCs
for &opt_anchors in [false, true].iter() {
- let mut claim_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() };
+ let mut claim_tx = Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() };
let mut sum_actual_sigs = 0;
for i in 0..4 {
claim_tx.input.push(TxIn {
vout: i,
},
script_sig: Script::new(),
- sequence: 0xfffffffd,
+ sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
witness: Witness::new(),
});
}
// Justice tx with 1 revoked HTLC-Success tx output
for &opt_anchors in [false, true].iter() {
- let mut claim_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() };
+ let mut claim_tx = Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() };
let mut sum_actual_sigs = 0;
claim_tx.input.push(TxIn {
previous_output: BitcoinOutPoint {
vout: 0,
},
script_sig: Script::new(),
- sequence: 0xfffffffd,
+ sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
witness: Witness::new(),
});
claim_tx.output.push(TxOut {
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
use bitcoin::hash_types::WPubkeyHash;
-use bitcoin::secp256k1::{SecretKey, PublicKey};
+use bitcoin::secp256k1::{SecretKey, PublicKey, Scalar};
use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature, Signing};
use bitcoin::secp256k1::ecdh::SharedSecret;
use bitcoin::secp256k1::ecdsa::RecoverableSignature;
-use bitcoin::{secp256k1, Witness};
+use bitcoin::{PackedLockTime, secp256k1, Sequence, Witness};
use util::{byte_utils, transaction_utils};
use util::crypto::{hkdf_extract_expand_twice, sign};
/// secret, though this is less efficient.
///
/// [`node secret`]: Self::get_node_secret
- fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&[u8; 32]>) -> Result<SharedSecret, ()>;
+ fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result<SharedSecret, ()>;
/// Get a script pubkey which we send funds to when claiming on-chain contestable outputs.
///
/// This method should return a different value each time it is called, to avoid linking
if spend_tx.input.len() <= input_idx { return Err(()); }
if !spend_tx.input[input_idx].script_sig.is_empty() { return Err(()); }
if spend_tx.input[input_idx].previous_output != descriptor.outpoint.into_bitcoin_outpoint() { return Err(()); }
- if spend_tx.input[input_idx].sequence != descriptor.to_self_delay as u32 { return Err(()); }
+ if spend_tx.input[input_idx].sequence.0 != descriptor.to_self_delay as u32 { return Err(()); }
let delayed_payment_key = chan_utils::derive_private_key(&secp_ctx, &descriptor.per_commitment_point, &self.delayed_payment_base_key)
.expect("We constructed the payment_base_key, so we can only fail here if the RNG is busted.");
input.push(TxIn {
previous_output: descriptor.outpoint.into_bitcoin_outpoint(),
script_sig: Script::new(),
- sequence: 0,
+ sequence: Sequence::ZERO,
witness: Witness::new(),
});
witness_weight += StaticPaymentOutputDescriptor::MAX_WITNESS_LENGTH;
input.push(TxIn {
previous_output: descriptor.outpoint.into_bitcoin_outpoint(),
script_sig: Script::new(),
- sequence: descriptor.to_self_delay as u32,
+ sequence: Sequence(descriptor.to_self_delay as u32),
witness: Witness::new(),
});
witness_weight += DelayedPaymentOutputDescriptor::MAX_WITNESS_LENGTH;
input.push(TxIn {
previous_output: outpoint.into_bitcoin_outpoint(),
script_sig: Script::new(),
- sequence: 0,
+ sequence: Sequence::ZERO,
witness: Witness::new(),
});
witness_weight += 1 + 73 + 34;
}
let mut spend_tx = Transaction {
version: 2,
- lock_time: 0,
+ lock_time: PackedLockTime(0),
input,
output: outputs,
};
}
}
- fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&[u8; 32]>) -> Result<SharedSecret, ()> {
+ fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result<SharedSecret, ()> {
let mut node_secret = self.get_node_secret(recipient)?;
if let Some(tweak) = tweak {
- node_secret.mul_assign(tweak).map_err(|_| ())?;
+ node_secret = node_secret.mul_tweak(tweak).map_err(|_| ())?;
}
Ok(SharedSecret::new(other_key, &node_secret))
}
}
}
- fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&[u8; 32]>) -> Result<SharedSecret, ()> {
+ fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result<SharedSecret, ()> {
let mut node_secret = self.get_node_secret(recipient)?;
if let Some(tweak) = tweak {
- node_secret.mul_assign(tweak).map_err(|_| ())?;
+ node_secret = node_secret.mul_tweak(tweak).map_err(|_| ())?;
}
Ok(SharedSecret::new(other_key, &node_secret))
}
use bitcoin::blockdata::block::{Block, BlockHeader};
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::blockdata::script::Script;
-use bitcoin::blockdata::transaction::{Transaction, TxOut};
+use bitcoin::blockdata::transaction::TxOut;
use bitcoin::hash_types::{BlockHash, Txid};
use bitcoin::network::constants::Network;
use bitcoin::secp256k1::PublicKey;
/// in the event of a chain reorganization, it must not be called with a `header` that is no
/// longer in the chain as of the last call to [`best_block_updated`].
///
- /// [chain order]: Confirm#Order
+ /// [chain order]: Confirm#order
/// [`best_block_updated`]: Self::best_block_updated
fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData, height: u32);
/// Processes a transaction that is no longer confirmed as result of a chain reorganization.
///
/// Should be called for any transaction returned by [`get_relevant_txids`] if it has been
- /// reorganized out of the best chain. Once called, the given transaction should not be returned
- /// by [`get_relevant_txids`] unless it has been reconfirmed via [`transactions_confirmed`].
+ /// reorganized out of the best chain. Once called, the given transaction will not be returned
+ /// by [`get_relevant_txids`], unless it has been reconfirmed via [`transactions_confirmed`].
///
/// [`get_relevant_txids`]: Self::get_relevant_txids
/// [`transactions_confirmed`]: Self::transactions_confirmed
/// Returns transactions that should be monitored for reorganization out of the chain.
///
- /// Should include any transactions passed to [`transactions_confirmed`] that have insufficient
- /// confirmations to be safe from a chain reorganization. Should not include any transactions
- /// passed to [`transaction_unconfirmed`] unless later reconfirmed.
+ /// Will include any transactions passed to [`transactions_confirmed`] that have insufficient
+ /// confirmations to be safe from a chain reorganization. Will not include any transactions
+ /// passed to [`transaction_unconfirmed`], unless later reconfirmed.
///
/// May be called to determine the subset of transactions that must still be monitored for
/// reorganization. Will be idempotent between calls but may change as a result of calls to the
/// Registers interest in spends of a transaction output.
///
- /// Optionally, when `output.block_hash` is set, should return any transaction spending the
- /// output that is found in the corresponding block along with its index.
- ///
- /// This return value is useful for Electrum clients in order to supply in-block descendant
- /// transactions which otherwise were not included. This is not necessary for other clients if
- /// such descendant transactions were already included (e.g., when a BIP 157 client provides the
- /// full block).
- fn register_output(&self, output: WatchedOutput) -> Option<(usize, Transaction)>;
+ /// Note that this method might be called during processing of a new block. You therefore need
+ /// to ensure that also dependent output spents within an already connected block are correctly
+ /// handled, e.g., by re-scanning the block in question whenever new outputs have been
+ /// registered mid-processing.
+ fn register_output(&self, output: WatchedOutput);
}
/// A transaction output watched by a [`ChannelMonitor`] for spends on-chain.
///
/// Used to convey to a [`Filter`] such an output with a given spending condition. Any transaction
/// spending the output must be given to [`ChannelMonitor::block_connected`] either directly or via
-/// the return value of [`Filter::register_output`].
+/// [`Confirm::transactions_confirmed`].
///
/// If `block_hash` is `Some`, this indicates the output was created in the corresponding block and
/// may have been spent there. See [`Filter::register_output`] for details.
use core::cmp;
use core::ops::Deref;
use core::mem::replace;
+use bitcoin::hashes::Hash;
const MAX_ALLOC_SIZE: usize = 64*1024;
impl MaybeReadable for OnchainEventEntry {
fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
- let mut txid = Default::default();
+ let mut txid = Txid::all_zeros();
let mut height = 0;
let mut event = None;
read_tlv_fields!(reader, {
if cached_request.is_malleable() {
let predicted_weight = cached_request.package_weight(&self.destination_script, self.channel_transaction_parameters.opt_anchors.is_some());
if let Some((output_value, new_feerate)) =
- cached_request.compute_package_output(predicted_weight, self.destination_script.dust_value().as_sat(), fee_estimator, logger) {
+ cached_request.compute_package_output(predicted_weight, self.destination_script.dust_value().to_sat(), fee_estimator, logger) {
assert!(new_feerate != 0);
let transaction = cached_request.finalize_package(self, output_value, self.destination_script.clone(), logger).unwrap();
}
}
+ pub(crate) fn is_output_spend_pending(&self, outpoint: &BitcoinOutPoint) -> bool {
+ self.claimable_outpoints.get(outpoint).is_some()
+ }
+
pub(crate) fn get_relevant_txids(&self) -> Vec<Txid> {
let mut txids: Vec<Txid> = self.onchain_events_awaiting_threshold_conf
.iter()
use core::cmp;
use core::mem;
use core::ops::Deref;
-use bitcoin::Witness;
+use bitcoin::{PackedLockTime, Sequence, Witness};
use super::chaininterface::LowerBoundedFeeEstimator;
if let Ok(chan_keys) = TxCreationKeys::derive_new(&onchain_handler.secp_ctx, &outp.per_commitment_point, &outp.counterparty_delayed_payment_base_key, &outp.counterparty_htlc_base_key, &onchain_handler.signer.pubkeys().revocation_basepoint, &onchain_handler.signer.pubkeys().htlc_basepoint) {
let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&outp.htlc, onchain_handler.opt_anchors(), &chan_keys.broadcaster_htlc_key, &chan_keys.countersignatory_htlc_key, &chan_keys.revocation_key);
- bumped_tx.lock_time = outp.htlc.cltv_expiry; // Right now we don't aggregate time-locked transaction, if we do we should set lock_time before to avoid breaking hash computation
+ bumped_tx.lock_time = PackedLockTime(outp.htlc.cltv_expiry); // Right now we don't aggregate time-locked transaction, if we do we should set lock_time before to avoid breaking hash computation
if let Ok(sig) = onchain_handler.signer.sign_counterparty_htlc_transaction(&bumped_tx, i, &outp.htlc.amount_msat / 1000, &outp.per_commitment_point, &outp.htlc, &onchain_handler.secp_ctx) {
let mut ser_sig = sig.serialize_der().to_vec();
ser_sig.push(EcdsaSighashType::All as u8);
PackageMalleability::Malleable => {
let mut bumped_tx = Transaction {
version: 2,
- lock_time: 0,
+ lock_time: PackedLockTime::ZERO,
input: vec![],
output: vec![TxOut {
script_pubkey: destination_script,
bumped_tx.input.push(TxIn {
previous_output: *outpoint,
script_sig: Script::new(),
- sequence: 0xfffffffd,
+ sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
witness: Witness::new(),
});
}
#![cfg_attr(not(any(test, fuzzing, feature = "_test_utils")), deny(missing_docs))]
#![cfg_attr(not(any(test, fuzzing, feature = "_test_utils")), forbid(unsafe_code))]
+
+// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
#![deny(broken_intra_doc_links)]
+#![deny(private_intra_doc_links)]
// In general, rust is absolutely horrid at supporting users doing things like,
// for example, compiling Rust code for real environments. Disable useless lints
pub mod chain;
pub mod ln;
pub mod routing;
-#[allow(unused)]
-mod onion_message; // To be exposed after sending/receiving OMs is supported in PeerManager.
+pub mod onion_message;
#[cfg(feature = "std")]
/// Re-export of either `core2::io` or `std::io`, depending on the `std` feature flag.
use util::{byte_utils, transaction_utils};
use bitcoin::hash_types::WPubkeyHash;
-use bitcoin::secp256k1::{SecretKey, PublicKey};
+use bitcoin::secp256k1::{SecretKey, PublicKey, Scalar};
use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature, Message};
use bitcoin::secp256k1::Error as SecpError;
-use bitcoin::{secp256k1, Witness};
+use bitcoin::{PackedLockTime, secp256k1, Sequence, Witness};
use io;
use prelude::*;
ins.push(TxIn {
previous_output: funding_outpoint,
script_sig: Script::new(),
- sequence: 0xffffffff,
+ sequence: Sequence::MAX,
witness: Witness::new(),
});
ins
Transaction {
version: 2,
- lock_time: 0,
+ lock_time: PackedLockTime::ZERO,
input: txins,
output: outputs,
}
sha.input(&PublicKey::from_secret_key(&secp_ctx, &base_secret).serialize());
let res = Sha256::from_engine(sha).into_inner();
- let mut key = base_secret.clone();
- key.add_assign(&res)?;
- Ok(key)
+ base_secret.clone().add_tweak(&Scalar::from_be_bytes(res).unwrap())
}
/// Derives a per-commitment-transaction public key (eg an htlc key or a delayed_payment key)
Sha256::from_engine(sha).into_inner()
};
- let mut countersignatory_contrib = countersignatory_revocation_base_secret.clone();
- countersignatory_contrib.mul_assign(&rev_append_commit_hash_key)?;
- let mut broadcaster_contrib = per_commitment_secret.clone();
- broadcaster_contrib.mul_assign(&commit_append_rev_hash_key)?;
- countersignatory_contrib.add_assign(&broadcaster_contrib[..])?;
- Ok(countersignatory_contrib)
+ let countersignatory_contrib = countersignatory_revocation_base_secret.clone().mul_tweak(&Scalar::from_be_bytes(rev_append_commit_hash_key).unwrap())?;
+ let broadcaster_contrib = per_commitment_secret.clone().mul_tweak(&Scalar::from_be_bytes(commit_append_rev_hash_key).unwrap())?;
+ countersignatory_contrib.add_tweak(&Scalar::from_be_bytes(broadcaster_contrib.secret_bytes()).unwrap())
}
/// Derives a per-commitment-transaction revocation public key from its constituent parts. This is
Sha256::from_engine(sha).into_inner()
};
- let mut countersignatory_contrib = countersignatory_revocation_base_point.clone();
- countersignatory_contrib.mul_assign(&secp_ctx, &rev_append_commit_hash_key)?;
- let mut broadcaster_contrib = per_commitment_point.clone();
- broadcaster_contrib.mul_assign(&secp_ctx, &commit_append_rev_hash_key)?;
+ let countersignatory_contrib = countersignatory_revocation_base_point.clone().mul_tweak(&secp_ctx, &Scalar::from_be_bytes(rev_append_commit_hash_key).unwrap())?;
+ let broadcaster_contrib = per_commitment_point.clone().mul_tweak(&secp_ctx, &Scalar::from_be_bytes(commit_append_rev_hash_key).unwrap())?;
countersignatory_contrib.combine(&broadcaster_contrib)
}
vout: htlc.transaction_output_index.expect("Can't build an HTLC transaction for a dust output"),
},
script_sig: Script::new(),
- sequence: if opt_anchors { 1 } else { 0 },
+ sequence: Sequence(if opt_anchors { 1 } else { 0 }),
witness: Witness::new(),
});
Transaction {
version: 2,
- lock_time: if htlc.offered { htlc.cltv_expiry } else { 0 },
+ lock_time: PackedLockTime(if htlc.offered { htlc.cltv_expiry } else { 0 }),
input: txins,
output: txouts,
}
holder_selected_contest_delay: 0,
is_outbound_from_holder: false,
counterparty_parameters: Some(CounterpartyChannelTransactionParameters { pubkeys: channel_pubkeys.clone(), selected_contest_delay: 0 }),
- funding_outpoint: Some(chain::transaction::OutPoint { txid: Default::default(), index: 0 }),
+ funding_outpoint: Some(chain::transaction::OutPoint { txid: Txid::all_zeros(), index: 0 }),
opt_anchors: None
};
let mut htlcs_with_aux: Vec<(_, ())> = Vec::new();
fn make_transaction(obscured_commitment_transaction_number: u64, txins: Vec<TxIn>, outputs: Vec<TxOut>) -> Transaction {
Transaction {
version: 2,
- lock_time: ((0x20 as u32) << 8 * 3) | ((obscured_commitment_transaction_number & 0xffffffu64) as u32),
+ lock_time: PackedLockTime(((0x20 as u32) << 8 * 3) | ((obscured_commitment_transaction_number & 0xffffffu64) as u32)),
input: txins,
output: outputs,
}
ins.push(TxIn {
previous_output: channel_parameters.funding_outpoint(),
script_sig: Script::new(),
- sequence: ((0x80 as u32) << 8 * 3)
- | ((obscured_commitment_transaction_number >> 3 * 8) as u32),
+ sequence: Sequence(((0x80 as u32) << 8 * 3)
+ | ((obscured_commitment_transaction_number >> 3 * 8) as u32)),
witness: Witness::new(),
});
ins
use bitcoin::secp256k1::{PublicKey, SecretKey, Secp256k1};
use util::test_utils;
use chain::keysinterface::{KeysInterface, BaseSign};
- use bitcoin::Network;
+ use bitcoin::{Network, Txid};
+ use bitcoin::hashes::Hash;
use ln::PaymentHash;
use bitcoin::hashes::hex::ToHex;
holder_selected_contest_delay: 0,
is_outbound_from_holder: false,
counterparty_parameters: Some(CounterpartyChannelTransactionParameters { pubkeys: counterparty_pubkeys.clone(), selected_contest_delay: 0 }),
- funding_outpoint: Some(chain::transaction::OutPoint { txid: Default::default(), index: 0 }),
+ funding_outpoint: Some(chain::transaction::OutPoint { txid: Txid::all_zeros(), index: 0 }),
opt_anchors: None
};
use util::test_utils;
use io;
+use bitcoin::hashes::Hash;
+use bitcoin::TxMerkleNode;
use prelude::*;
use sync::{Arc, Mutex};
assert!(chain_mon.watch_channel(outpoint, new_monitor).is_ok());
chain_mon
};
- let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let header = BlockHeader {
+ version: 0x20000000,
+ prev_blockhash: BlockHash::all_zeros(),
+ merkle_root: TxMerkleNode::all_zeros(),
+ time: 42,
+ bits: 42,
+ nonce: 42
+ };
chain_mon.chain_monitor.block_connected(&Block { header, txdata: vec![] }, 200);
// Set the persister's return value to be a TemporaryFailure.
use util::errors::APIError;
use util::test_utils;
use util::test_utils::OnGetShutdownScriptpubkey;
- use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
+ use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature, Scalar};
use bitcoin::secp256k1::ffi::Signature as FFISignature;
use bitcoin::secp256k1::{SecretKey,PublicKey};
use bitcoin::secp256k1::ecdh::SharedSecret;
use bitcoin::hashes::Hash;
use bitcoin::hash_types::WPubkeyHash;
use bitcoin::bech32::u5;
+ use bitcoin::PackedLockTime;
use bitcoin::util::address::WitnessVersion;
use prelude::*;
type Signer = InMemorySigner;
fn get_node_secret(&self, _recipient: Recipient) -> Result<SecretKey, ()> { panic!(); }
- fn ecdh(&self, _recipient: Recipient, _other_key: &PublicKey, _tweak: Option<&[u8; 32]>) -> Result<SharedSecret, ()> { panic!(); }
+ fn ecdh(&self, _recipient: Recipient, _other_key: &PublicKey, _tweak: Option<&Scalar>) -> Result<SharedSecret, ()> { panic!(); }
fn get_inbound_payment_key_material(&self) -> KeyMaterial { panic!(); }
fn get_destination_script(&self) -> Script {
let secp_ctx = Secp256k1::signing_only();
// Node A --> Node B: funding created
let output_script = node_a_chan.get_funding_redeemscript();
- let tx = Transaction { version: 1, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+ let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
value: 10000000, script_pubkey: output_script.clone(),
}]};
let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
use bitcoin::secp256k1::{SecretKey,PublicKey};
use bitcoin::secp256k1::Secp256k1;
use bitcoin::secp256k1::ecdh::SharedSecret;
-use bitcoin::secp256k1;
+use bitcoin::{LockTime, secp256k1, Sequence};
use chain;
use chain::{Confirm, ChannelMonitorUpdateErr, Watch, BestBlock};
// constituting our Lightning node might not have perfect sync about their blockchain views. Thus, if
// the wallet module is in advance on the LDK view, allow one more block of headroom.
// TODO: updated if/when https://github.com/rust-bitcoin/rust-bitcoin/pull/994 landed and rust-bitcoin bumped.
- if !funding_transaction.input.iter().all(|input| input.sequence == 0xffffffff) && funding_transaction.lock_time < 500_000_000 && funding_transaction.lock_time > height + 2 {
+ if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) && LockTime::from(funding_transaction.lock_time).is_block_height() && funding_transaction.lock_time.0 > height + 2 {
return Err(APIError::APIMisuseError {
err: "Funding transaction absolute timelock is non-final".to_owned()
});
if were_node_one == msg_from_node_one {
return Ok(NotifyOption::SkipPersist);
} else {
+ log_debug!(self.logger, "Received channel_update for channel {}.", log_bytes!(chan_id));
try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
}
},
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
- use bitcoin::{Block, BlockHeader, Transaction, TxOut};
+ use bitcoin::{Block, BlockHeader, PackedLockTime, Transaction, TxMerkleNode, TxOut};
use sync::{Arc, Mutex};
let tx;
if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) {
- tx = Transaction { version: 2, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+ tx = Transaction { version: 2, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
value: 8_000_000, script_pubkey: output_script,
}]};
node_a.funding_transaction_generated(&temporary_channel_id, &node_b.get_our_node_id(), tx.clone()).unwrap();
assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
let block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: genesis_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+ header: BlockHeader { version: 0x20000000, prev_blockhash: genesis_hash, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
txdata: vec![tx],
};
Listen::block_connected(&node_a, &block, 1);
define_feature!(27, ShutdownAnySegwit, [InitContext, NodeContext],
"Feature flags for `opt_shutdown_anysegwit`.", set_shutdown_any_segwit_optional,
set_shutdown_any_segwit_required, supports_shutdown_anysegwit, requires_shutdown_anysegwit);
+ // We do not yet advertise the onion messages feature bit, but we need to detect when peers
+ // support it.
+ define_feature!(39, OnionMessages, [InitContext, NodeContext],
+ "Feature flags for `option_onion_messages`.", set_onion_messages_optional,
+ set_onion_messages_required, supports_onion_messages, requires_onion_messages);
define_feature!(45, ChannelType, [InitContext, NodeContext],
"Feature flags for `option_channel_type`.", set_channel_type_optional,
set_channel_type_required, supports_channel_type, requires_channel_type);
impl<T: sealed::InitialRoutingSync> Features<T> {
// We are no longer setting initial_routing_sync now that gossip_queries
- // is enabled. This feature is ignored by a peer when gossip_queries has
+ // is enabled. This feature is ignored by a peer when gossip_queries has
// been negotiated.
#[cfg(test)]
pub(crate) fn clear_initial_routing_sync(&mut self) {
use sync::{Arc, Mutex};
use core::mem;
use core::iter::repeat;
+use bitcoin::{PackedLockTime, TxMerkleNode};
pub const CHAN_CONFIRM_DEPTH: u32 = 10;
connect_blocks(node, conf_height - first_connect_height);
}
let mut block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: node.best_block_hash(), merkle_root: Default::default(), time: conf_height, bits: 42, nonce: 42 },
+ header: BlockHeader { version: 0x20000000, prev_blockhash: node.best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: conf_height, bits: 42, nonce: 42 },
txdata: Vec::new(),
};
for _ in 0..*node.network_chan_count.borrow() { // Make sure we don't end up with channels at the same short id by offsetting by chan_count
- block.txdata.push(Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() });
+ block.txdata.push(Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() });
}
block.txdata.push(tx.clone());
connect_block(node, &block);
let height = node.best_block_info().1 + 1;
let mut block = Block {
- header: BlockHeader { version: 0x2000000, prev_blockhash: node.best_block_hash(), merkle_root: Default::default(), time: height, bits: 42, nonce: 42 },
+ header: BlockHeader { version: 0x2000000, prev_blockhash: node.best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 },
txdata: vec![],
};
assert!(depth >= 1);
let prev_blockhash = block.header.block_hash();
do_connect_block(node, block, skip_intermediaries);
block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: Default::default(), time: height + i, bits: 42, nonce: 42 },
+ header: BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: TxMerkleNode::all_zeros(), time: height + i, bits: 42, nonce: 42 },
txdata: vec![],
};
}
);
let mut chan_progress = 0;
loop {
- let orig_announcements = self.gossip_sync.get_next_channel_announcements(chan_progress, 255);
- let deserialized_announcements = gossip_sync.get_next_channel_announcements(chan_progress, 255);
+ let orig_announcements = self.gossip_sync.get_next_channel_announcement(chan_progress);
+ let deserialized_announcements = gossip_sync.get_next_channel_announcement(chan_progress);
assert!(orig_announcements == deserialized_announcements);
- chan_progress = match orig_announcements.last() {
+ chan_progress = match orig_announcements {
Some(announcement) => announcement.0.contents.short_channel_id + 1,
None => break,
};
}
let mut node_progress = None;
loop {
- let orig_announcements = self.gossip_sync.get_next_node_announcements(node_progress.as_ref(), 255);
- let deserialized_announcements = gossip_sync.get_next_node_announcements(node_progress.as_ref(), 255);
+ let orig_announcements = self.gossip_sync.get_next_node_announcement(node_progress.as_ref());
+ let deserialized_announcements = gossip_sync.get_next_node_announcement(node_progress.as_ref());
assert!(orig_announcements == deserialized_announcements);
- node_progress = match orig_announcements.last() {
+ node_progress = match orig_announcements {
Some(announcement) => Some(announcement.contents.node_id),
None => break,
};
assert_eq!(*channel_value_satoshis, expected_chan_value);
assert_eq!(user_channel_id, expected_user_chan_id);
- let tx = Transaction { version: chan_id as i32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+ let tx = Transaction { version: chan_id as i32, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
value: *channel_value_satoshis, script_pubkey: output_script.clone(),
}]};
let funding_outpoint = OutPoint { txid: tx.txid(), index: 0 };
{
$(
for outp in $spends_txn.output.iter() {
- assert!(outp.value >= outp.script_pubkey.dust_value().as_sat(), "Input tx output didn't meet dust limit");
+ assert!(outp.value >= outp.script_pubkey.dust_value().to_sat(), "Input tx output didn't meet dust limit");
}
)*
for outp in $tx.output.iter() {
- assert!(outp.value >= outp.script_pubkey.dust_value().as_sat(), "Spending tx output didn't meet dust limit");
+ assert!(outp.value >= outp.script_pubkey.dust_value().to_sat(), "Spending tx output didn't meet dust limit");
}
let get_output = |out_point: &bitcoin::blockdata::transaction::OutPoint| {
$(
};
}
-pub fn expect_payment_failed_conditions<'a, 'b, 'c, 'd, 'e>(
- node: &'a Node<'b, 'c, 'd>, expected_payment_hash: PaymentHash, expected_rejected_by_dest: bool,
- conditions: PaymentFailedConditions<'e>
+pub fn expect_payment_failed_conditions_event<'a, 'b, 'c, 'd, 'e>(
+ node: &'a Node<'b, 'c, 'd>, payment_failed_event: Event, expected_payment_hash: PaymentHash,
+ expected_rejected_by_dest: bool, conditions: PaymentFailedConditions<'e>
) {
- let mut events = node.node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
- let expected_payment_id = match events.pop().unwrap() {
+ let expected_payment_id = match payment_failed_event {
Event::PaymentPathFailed { payment_hash, rejected_by_dest, path, retry, payment_id, network_update, short_channel_id,
#[cfg(test)]
error_code,
}
}
+pub fn expect_payment_failed_conditions<'a, 'b, 'c, 'd, 'e>(
+ node: &'a Node<'b, 'c, 'd>, expected_payment_hash: PaymentHash, expected_rejected_by_dest: bool,
+ conditions: PaymentFailedConditions<'e>
+) {
+ let mut events = node.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ expect_payment_failed_conditions_event(node, events.pop().unwrap(), expected_payment_hash, expected_rejected_by_dest, conditions);
+}
+
pub fn send_along_route_with_secret<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, route: Route, expected_paths: &[&[&Node<'a, 'b, 'c>]], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: PaymentSecret) -> PaymentId {
let payment_id = origin_node.node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
check_added_monitors!(origin_node, expected_paths.len());
if tx.input.len() == 1 && tx.input[0].previous_output.txid == res[0].txid() {
check_spends!(tx, res[0]);
if has_htlc_tx == HTLCType::TIMEOUT {
- assert!(tx.lock_time != 0);
+ assert!(tx.lock_time.0 != 0);
} else {
- assert!(tx.lock_time == 0);
+ assert!(tx.lock_time.0 == 0);
}
res.push(tx.clone());
false
use bitcoin::blockdata::opcodes;
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::network::constants::Network;
-use bitcoin::{Transaction, TxIn, TxOut, Witness};
+use bitcoin::{PackedLockTime, Sequence, Transaction, TxIn, TxMerkleNode, TxOut, Witness};
use bitcoin::OutPoint as BitcoinOutPoint;
use bitcoin::secp256k1::Secp256k1;
use alloc::collections::BTreeSet;
use core::default::Default;
use core::iter::repeat;
+use bitcoin::hashes::Hash;
use sync::{Arc, Mutex};
use ln::functional_test_utils::*;
if steps & 0b1000_0000 != 0{
let block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+ header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
txdata: vec![],
};
connect_block(&nodes[0], &block);
assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
- assert_eq!(node_txn[0].lock_time, 0);
- assert_eq!(node_txn[1].lock_time, 0);
+ assert_eq!(node_txn[0].lock_time.0, 0);
+ assert_eq!(node_txn[1].lock_time.0, 0);
// Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
- let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
connect_block(&nodes[1], &Block { header, txdata: node_txn});
connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
{
// Node[0]: ChannelManager: 3 (commtiemtn tx, 2*HTLC-Timeout tx), ChannelMonitor: 2 HTLC-timeout
check_spends!(node_txn[1], $commitment_tx);
check_spends!(node_txn[2], $commitment_tx);
- assert_ne!(node_txn[1].lock_time, 0);
- assert_ne!(node_txn[2].lock_time, 0);
+ assert_ne!(node_txn[1].lock_time.0, 0);
+ assert_ne!(node_txn[2].lock_time.0, 0);
if $htlc_offered {
assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
assert_eq!(commitment_spend.input.len(), 2);
assert_eq!(commitment_spend.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
- assert_eq!(commitment_spend.lock_time, 0);
+ assert_eq!(commitment_spend.lock_time.0, 0);
assert!(commitment_spend.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
check_spends!(node_txn[3], chan_1.3);
assert_eq!(node_txn[3].input[0].witness.clone().last().unwrap().len(), 71);
// we already checked the same situation with A.
// Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
- let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
+ let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
connect_block(&nodes[0], &Block { header, txdata: vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()] });
connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires
check_closed_broadcast!(nodes[0], true);
assert_eq!(node_txn.len(), 3);
assert_eq!(node_txn[0], node_txn[1]);
- let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]});
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
assert_eq!(node_txn.len(), 1);
assert_eq!(node_txn[0].input.len(), 1);
assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid());
- assert_eq!(node_txn[0].lock_time, 0); // Must be an HTLC-Success
+ assert_eq!(node_txn[0].lock_time.0, 0); // Must be an HTLC-Success
assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success
check_spends!(node_txn[0], tx);
};
let mut block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+ header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
txdata: vec![],
};
connect_block(&nodes[0], &block);
assert_eq!(spend_txn.len(), 1);
assert_eq!(spend_txn[0].input.len(), 1);
check_spends!(spend_txn[0], node_txn[0]);
- assert_eq!(spend_txn[0].input[0].sequence, BREAKDOWN_TIMEOUT as u32);
+ assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
}
#[test]
assert_eq!(revoked_htlc_txn[1].input.len(), 1);
assert_eq!(revoked_htlc_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
check_spends!(revoked_htlc_txn[1], revoked_local_txn[0]);
- assert_ne!(revoked_htlc_txn[1].lock_time, 0); // HTLC-Timeout
+ assert_ne!(revoked_htlc_txn[1].lock_time.0, 0); // HTLC-Timeout
// B will generate justice tx from A's revoked commitment/HTLC tx
- let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[1].clone()] });
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
assert_eq!(revoked_local_txn[0].output[unspent_local_txn_output].script_pubkey.len(), 2 + 20); // P2WPKH
// A will generate justice tx from B's revoked commitment/HTLC tx
- let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] });
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
assert_eq!(c_txn[1].input[0].witness.clone().last().unwrap().len(), 71);
assert_eq!(c_txn[2].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
assert!(c_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
- assert_eq!(c_txn[0].lock_time, 0); // Success tx
+ assert_eq!(c_txn[0].lock_time.0, 0); // Success tx
// So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
- let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
connect_block(&nodes[1], &Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]});
check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_events();
check_spends!(b_txn[0], commitment_tx[0]);
assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
- assert_eq!(b_txn[0].lock_time, 0); // Success tx
+ assert_eq!(b_txn[0].lock_time.0, 0); // Success tx
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
assert_eq!(spend_txn.len(), 1);
assert_eq!(spend_txn[0].input.len(), 1);
check_spends!(spend_txn[0], node_tx);
- assert_eq!(spend_txn[0].input[0].sequence, BREAKDOWN_TIMEOUT as u32);
+ assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
}
fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, announce_latest: bool) {
check_spends!(spend_txn[0], local_txn[0]);
assert_eq!(spend_txn[1].input.len(), 1);
check_spends!(spend_txn[1], htlc_timeout);
- assert_eq!(spend_txn[1].input[0].sequence, BREAKDOWN_TIMEOUT as u32);
+ assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
assert_eq!(spend_txn[2].input.len(), 2);
check_spends!(spend_txn[2], local_txn[0], htlc_timeout);
- assert!(spend_txn[2].input[0].sequence == BREAKDOWN_TIMEOUT as u32 ||
- spend_txn[2].input[1].sequence == BREAKDOWN_TIMEOUT as u32);
+ assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
+ spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
}
#[test]
check_spends!(spend_txn[0], local_txn_1[0]);
assert_eq!(spend_txn[1].input.len(), 1);
check_spends!(spend_txn[1], htlc_timeout);
- assert_eq!(spend_txn[1].input[0].sequence, BREAKDOWN_TIMEOUT as u32);
+ assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32);
assert_eq!(spend_txn[2].input.len(), 2);
check_spends!(spend_txn[2], local_txn_1[0], htlc_timeout);
- assert!(spend_txn[2].input[0].sequence == BREAKDOWN_TIMEOUT as u32 ||
- spend_txn[2].input[1].sequence == BREAKDOWN_TIMEOUT as u32);
+ assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 ||
+ spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32);
}
#[test]
let starting_block = nodes[1].best_block_info();
let mut block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+ header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
txdata: vec![],
};
for _ in starting_block.1 + 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + starting_block.1 + 2 {
// to "time out" the HTLC.
let starting_block = nodes[1].best_block_info();
- let mut header = BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let mut header = BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 {
connect_block(&nodes[0], &Block { header, txdata: Vec::new()});
let starting_block = nodes[1].best_block_info();
let mut block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+ header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
txdata: vec![],
};
for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 {
if !revoked {
assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
} else {
- assert_eq!(timeout_tx[0].lock_time, 0);
+ assert_eq!(timeout_tx[0].lock_time.0, 0);
}
// We fail non-dust-HTLC 2 by broadcast of local timeout/revocation-claim tx
mine_transaction(&nodes[0], &timeout_tx[0]);
// Actually revoke tx by claiming a HTLC
claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
- let header = BlockHeader { version: 0x20000000, prev_blockhash: header_114, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: header_114, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[1], &Block { header, txdata: vec![revoked_txn[0].clone()] });
check_added_monitors!(nodes[1], 1);
// Revoke local commitment tx
claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
- let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
// B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx
connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] });
check_closed_broadcast!(nodes[1], true);
// Broadcast set of revoked txn on A
let hash_128 = connect_blocks(&nodes[0], 40);
- let header_11 = BlockHeader { version: 0x20000000, prev_blockhash: hash_128, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let header_11 = BlockHeader { version: 0x20000000, prev_blockhash: hash_128, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[0], &Block { header: header_11, txdata: vec![revoked_local_txn[0].clone()] });
- let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] });
let events = nodes[0].node.get_and_clear_pending_events();
expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
}
// Connect one more block to see if bumped penalty are issued for HTLC txn
- let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: header_129.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: header_129.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn });
- let header_131 = BlockHeader { version: 0x20000000, prev_blockhash: header_130.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let header_131 = BlockHeader { version: 0x20000000, prev_blockhash: header_130.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[0], &Block { header: header_131, txdata: Vec::new() });
{
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
txn
};
// Broadcast claim txn and confirm blocks to avoid further bumps on this outputs
- let header_145 = BlockHeader { version: 0x20000000, prev_blockhash: header_144, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let header_145 = BlockHeader { version: 0x20000000, prev_blockhash: header_144, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[0], &Block { header: header_145, txdata: node_txn });
connect_blocks(&nodes[0], 20);
{
node_txn.clear();
penalty_txn
};
- let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn });
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
{
header: BlockHeader {
version: 0x2000000,
prev_blockhash: node_1_blocks.last().unwrap().0.block_hash(),
- merkle_root: Default::default(),
+ merkle_root: TxMerkleNode::all_zeros(),
time: node_1_blocks.len() as u32 + 7200, bits: 42, nonce: 42 },
txdata: vec![],
}
assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
watchtower
};
- let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
let block = Block { header, txdata: vec![] };
// Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
// transaction lock time requirements here.
assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
watchtower
};
- let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
let block = Block { header, txdata: vec![] };
// Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
// transaction lock time requirements here.
assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
watchtower
};
- let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
watchtower_bob.chain_monitor.block_connected(&Block { header, txdata: vec![] }, CHAN_CONFIRM_DEPTH + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
// Route another payment to generate another update with still previous HTLC pending
check_added_monitors!(nodes[0], 1);
//// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout
- let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
watchtower_bob.chain_monitor.block_connected(&Block { header, txdata: vec![] }, CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
// Watchtower Bob should have broadcast a commitment/HTLC-timeout
};
// We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout
- let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
watchtower_alice.chain_monitor.block_connected(&Block { header, txdata: vec![bob_state_y.clone()] }, CHAN_CONFIRM_DEPTH + 2 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
{
let htlc_txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
check_spends!(local_txn[0], chan_1.3);
// Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
- let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[0], &Block { header, txdata: vec![local_txn[0].clone()] });
// We deliberately connect the local tx twice as this should provoke a failure calling
// this test before #653 fix.
node_txn[1].clone()
};
- let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[0], &Block { header: header_201, txdata: vec![htlc_timeout.clone()] });
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
expect_payment_failed!(nodes[0], our_payment_hash, true);
true => alice_txn.clone(),
false => get_local_commitment_txn!(nodes[1], chan_ab.2)
};
- let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]});
let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
if broadcast_alice {
let mut txn_to_broadcast = alice_txn.clone();
if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); }
if !go_onchain_before_fulfill {
- let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]});
// If Bob was the one to force-close, he will have already passed these checks earlier.
if broadcast_alice {
// long the ChannelMonitor will try to read 32 bytes from the second-to-last element, panicing
// as its not 32 bytes long.
let mut spend_tx = Transaction {
- version: 2i32, lock_time: 0,
+ version: 2i32, lock_time: PackedLockTime::ZERO,
input: tx.output.iter().enumerate().map(|(idx, _)| TxIn {
previous_output: BitcoinOutPoint {
txid: tx.txid(),
vout: idx as u32,
},
script_sig: Script::new(),
- sequence: 0xfffffffd,
+ sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
witness: Witness::from_vec(channelmonitor::deliberately_bogus_accepted_htlc_witness())
}).collect(),
output: vec![TxOut {
let chan_id = *nodes[0].network_chan_count.borrow();
let events = nodes[0].node.get_and_clear_pending_events();
- let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::Script::new(), sequence: 0x1, witness: Witness::from_vec(vec!(vec!(1))) };
+ let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::Script::new(), sequence: Sequence(1), witness: Witness::from_vec(vec!(vec!(1))) };
assert_eq!(events.len(), 1);
let mut tx = match events[0] {
Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
// Timelock the transaction _beyond_ the best client height + 2.
- Transaction { version: chan_id as i32, lock_time: best_height + 3, input: vec![input], output: vec![TxOut {
+ Transaction { version: chan_id as i32, lock_time: PackedLockTime(best_height + 3), input: vec![input], output: vec![TxOut {
value: *channel_value_satoshis, script_pubkey: output_script.clone(),
}]}
},
}
// However, transaction should be accepted if it's in a +2 headroom from best block.
- tx.lock_time -= 1;
+ tx.lock_time = PackedLockTime(tx.lock_time.0 - 1);
assert!(nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
}
use chain::channelmonitor::{ANTI_REORG_DELAY, Balance};
use chain::transaction::OutPoint;
+use chain::chaininterface::LowerBoundedFeeEstimator;
use ln::channel;
use ln::channelmanager::BREAKDOWN_TIMEOUT;
use ln::features::InitFeatures;
v
}
+/// Asserts that `a` and `b` are close, but maybe off by up to 5.
+/// This is useful when checking fees and weights on transactions as things may vary by a few based
+/// on signature size and signature size estimation being non-exact.
+fn fuzzy_assert_eq<V: core::convert::TryInto<u64>>(a: V, b: V) {
+ let a_u64 = a.try_into().map_err(|_| ()).unwrap();
+ let b_u64 = b.try_into().map_err(|_| ()).unwrap();
+ eprintln!("Checking {} and {} for fuzzy equality", a_u64, b_u64);
+ assert!(a_u64 >= b_u64 - 5);
+ assert!(b_u64 >= a_u64 - 5);
+}
+
fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
// Tests `get_claimable_balances` with an HTLC across a force-close.
// We build a channel with an HTLC pending, then force close the channel and check that the
assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose {
claimable_amount_satoshis: 1_000_000 - 3_000 - 4_000 - 1_000 - 3 - chan_feerate *
(channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
- }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+ }, Balance::MaybeTimeoutClaimableHTLC {
claimable_amount_satoshis: 3_000,
claimable_height: htlc_cltv_timeout,
- }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+ }, Balance::MaybeTimeoutClaimableHTLC {
claimable_amount_satoshis: 4_000,
claimable_height: htlc_cltv_timeout,
}]),
sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
- assert_eq!(vec![Balance::ClaimableOnChannelClose {
+ assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose {
claimable_amount_satoshis: 1_000,
- }],
- nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances());
+ }, Balance::MaybePreimageClaimableHTLC {
+ claimable_amount_satoshis: 3_000,
+ expiry_height: htlc_cltv_timeout,
+ }, Balance::MaybePreimageClaimableHTLC {
+ claimable_amount_satoshis: 4_000,
+ expiry_height: htlc_cltv_timeout,
+ }]),
+ sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
nodes[1].node.claim_funds(payment_preimage);
check_added_monitors!(nodes[1], 1);
chan_feerate * (channel::commitment_tx_base_weight(opt_anchors) +
if prev_commitment_tx { 1 } else { 2 } *
channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
- }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+ }, Balance::MaybeTimeoutClaimableHTLC {
claimable_amount_satoshis: 4_000,
claimable_height: htlc_cltv_timeout,
}];
if !prev_commitment_tx {
- a_expected_balances.push(Balance::MaybeClaimableHTLCAwaitingTimeout {
+ a_expected_balances.push(Balance::MaybeTimeoutClaimableHTLC {
claimable_amount_satoshis: 3_000,
claimable_height: htlc_cltv_timeout,
});
claimable_amount_satoshis: 1_000_000 - 3_000 - 4_000 - 1_000 - 3 - chan_feerate *
(channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1,
- }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+ }, Balance::MaybeTimeoutClaimableHTLC {
claimable_amount_satoshis: 3_000,
claimable_height: htlc_cltv_timeout,
- }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+ }, Balance::MaybeTimeoutClaimableHTLC {
claimable_amount_satoshis: 4_000,
claimable_height: htlc_cltv_timeout,
}]),
// After ANTI_REORG_DELAY, A will consider its balance fully spendable and generate a
// `SpendableOutputs` event. However, B still has to wait for the CSV delay.
- assert_eq!(sorted_vec(vec![Balance::MaybeClaimableHTLCAwaitingTimeout {
+ assert_eq!(sorted_vec(vec![Balance::MaybeTimeoutClaimableHTLC {
claimable_amount_satoshis: 3_000,
claimable_height: htlc_cltv_timeout,
- }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+ }, Balance::MaybeTimeoutClaimableHTLC {
claimable_amount_satoshis: 4_000,
claimable_height: htlc_cltv_timeout,
}]),
} else {
expect_payment_sent!(nodes[0], payment_preimage);
}
- assert_eq!(sorted_vec(vec![Balance::MaybeClaimableHTLCAwaitingTimeout {
+ assert_eq!(sorted_vec(vec![Balance::MaybeTimeoutClaimableHTLC {
claimable_amount_satoshis: 3_000,
claimable_height: htlc_cltv_timeout,
- }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+ }, Balance::MaybeTimeoutClaimableHTLC {
claimable_amount_satoshis: 4_000,
claimable_height: htlc_cltv_timeout,
}]),
sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
- assert_eq!(vec![Balance::MaybeClaimableHTLCAwaitingTimeout {
+ assert_eq!(vec![Balance::MaybeTimeoutClaimableHTLC {
claimable_amount_satoshis: 4_000,
claimable_height: htlc_cltv_timeout,
}],
claimable_amount_satoshis: 1_000_000 - 10_000 - 20_000 - chan_feerate *
(channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
confirmation_height: node_a_commitment_claimable,
- }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+ }, Balance::MaybeTimeoutClaimableHTLC {
claimable_amount_satoshis: 10_000,
claimable_height: htlc_cltv_timeout,
- }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+ }, Balance::MaybeTimeoutClaimableHTLC {
claimable_amount_satoshis: 20_000,
claimable_height: htlc_cltv_timeout,
}]),
claimable_amount_satoshis: 1_000_000 - 10_000 - 20_000 - chan_feerate *
(channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
confirmation_height: node_a_commitment_claimable,
- }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+ }, Balance::MaybeTimeoutClaimableHTLC {
claimable_amount_satoshis: 10_000,
claimable_height: htlc_cltv_timeout,
- }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+ }, Balance::MaybeTimeoutClaimableHTLC {
claimable_amount_satoshis: 20_000,
claimable_height: htlc_cltv_timeout,
}]),
sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
- assert_eq!(as_txn[1].lock_time, nodes[0].best_block_info().1 + 1); // as_txn[1] can be included in the next block
+ assert_eq!(as_txn[1].lock_time.0, nodes[0].best_block_info().1 + 1); // as_txn[1] can be included in the next block
// Now confirm nodes[0]'s HTLC-Timeout transaction, which changes the claimable balance to an
// "awaiting confirmations" one.
}, Balance::ClaimableAwaitingConfirmations {
claimable_amount_satoshis: 10_000,
confirmation_height: node_a_htlc_claimable,
- }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+ }, Balance::MaybeTimeoutClaimableHTLC {
claimable_amount_satoshis: 20_000,
claimable_height: htlc_cltv_timeout,
}]),
}, Balance::ClaimableAwaitingConfirmations {
claimable_amount_satoshis: 10_000,
confirmation_height: node_a_htlc_claimable,
- }, Balance::MaybeClaimableHTLCAwaitingTimeout {
+ }, Balance::MaybeTimeoutClaimableHTLC {
claimable_amount_satoshis: 20_000,
claimable_height: htlc_cltv_timeout,
}]),
assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty());
test_spendable_output(&nodes[0], &as_txn[1]);
}
+
+#[test]
+fn test_no_preimage_inbound_htlc_balances() {
+ // Tests that MaybePreimageClaimableHTLC are generated for inbound HTLCs for which we do not
+ // have a preimage.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000, InitFeatures::known(), InitFeatures::known());
+ let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
+
+ // Send two HTLCs, one from A to B, and one from B to A.
+ let to_b_failed_payment_hash = route_payment(&nodes[0], &[&nodes[1]], 10_000_000).1;
+ let to_a_failed_payment_hash = route_payment(&nodes[1], &[&nodes[0]], 20_000_000).1;
+ let htlc_cltv_timeout = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; // Note ChannelManager adds one to CLTV timeouts for safety
+
+ let chan_feerate = get_feerate!(nodes[0], chan_id) as u64;
+ let opt_anchors = get_opt_anchors!(nodes[0], chan_id);
+
+ // Both A and B will have an HTLC that's claimable on timeout and one that's claimable if they
+ // receive the preimage. These will remain the same through the channel closure and until the
+ // HTLC output is spent.
+
+ assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose {
+ claimable_amount_satoshis: 1_000_000 - 500_000 - 10_000 - chan_feerate *
+ (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+ }, Balance::MaybePreimageClaimableHTLC {
+ claimable_amount_satoshis: 20_000,
+ expiry_height: htlc_cltv_timeout,
+ }, Balance::MaybeTimeoutClaimableHTLC {
+ claimable_amount_satoshis: 10_000,
+ claimable_height: htlc_cltv_timeout,
+ }]),
+ sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose {
+ claimable_amount_satoshis: 500_000 - 20_000,
+ }, Balance::MaybePreimageClaimableHTLC {
+ claimable_amount_satoshis: 10_000,
+ expiry_height: htlc_cltv_timeout,
+ }, Balance::MaybeTimeoutClaimableHTLC {
+ claimable_amount_satoshis: 20_000,
+ claimable_height: htlc_cltv_timeout,
+ }]),
+ sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ // Get nodes[0]'s commitment transaction and HTLC-Timeout transaction
+ let as_txn = get_local_commitment_txn!(nodes[0], chan_id);
+ assert_eq!(as_txn.len(), 2);
+ check_spends!(as_txn[1], as_txn[0]);
+ check_spends!(as_txn[0], funding_tx);
+
+ // Now close the channel by confirming A's commitment transaction on both nodes, checking the
+ // claimable balances remain the same except for the non-HTLC balance changing variant.
+ let node_a_commitment_claimable = nodes[0].best_block_info().1 + BREAKDOWN_TIMEOUT as u32;
+ let as_pre_spend_claims = sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: 1_000_000 - 500_000 - 10_000 - chan_feerate *
+ (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+ confirmation_height: node_a_commitment_claimable,
+ }, Balance::MaybePreimageClaimableHTLC {
+ claimable_amount_satoshis: 20_000,
+ expiry_height: htlc_cltv_timeout,
+ }, Balance::MaybeTimeoutClaimableHTLC {
+ claimable_amount_satoshis: 10_000,
+ claimable_height: htlc_cltv_timeout,
+ }]);
+
+ mine_transaction(&nodes[0], &as_txn[0]);
+ nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
+ check_added_monitors!(nodes[0], 1);
+ check_closed_broadcast!(nodes[0], true);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+
+ assert_eq!(as_pre_spend_claims,
+ sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ mine_transaction(&nodes[1], &as_txn[0]);
+ check_added_monitors!(nodes[1], 1);
+ check_closed_broadcast!(nodes[1], true);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+
+ let node_b_commitment_claimable = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1;
+ let mut bs_pre_spend_claims = sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: 500_000 - 20_000,
+ confirmation_height: node_b_commitment_claimable,
+ }, Balance::MaybePreimageClaimableHTLC {
+ claimable_amount_satoshis: 10_000,
+ expiry_height: htlc_cltv_timeout,
+ }, Balance::MaybeTimeoutClaimableHTLC {
+ claimable_amount_satoshis: 20_000,
+ claimable_height: htlc_cltv_timeout,
+ }]);
+ assert_eq!(bs_pre_spend_claims,
+ sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ // We'll broadcast the HTLC-Timeout transaction one block prior to the htlc's expiration (as it
+ // is confirmable in the next block), but will still include the same claimable balances as no
+ // HTLC has been spent, even after the HTLC expires. We'll also fail the inbound HTLC, but it
+ // won't do anything as the channel is already closed.
+
+ connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1);
+ let as_htlc_timeout_claim = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(as_htlc_timeout_claim.len(), 1);
+ check_spends!(as_htlc_timeout_claim[0], as_txn[0]);
+ expect_pending_htlcs_forwardable_conditions!(nodes[0],
+ [HTLCDestination::FailedPayment { payment_hash: to_a_failed_payment_hash }]);
+
+ assert_eq!(as_pre_spend_claims,
+ sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ connect_blocks(&nodes[0], 1);
+ assert_eq!(as_pre_spend_claims,
+ sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ // For node B, we'll get the non-HTLC funds claimable after ANTI_REORG_DELAY confirmations
+ connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
+ test_spendable_output(&nodes[1], &as_txn[0]);
+ bs_pre_spend_claims.retain(|e| if let Balance::ClaimableAwaitingConfirmations { .. } = e { false } else { true });
+
+ // The next few blocks for B look the same as for A, though for the opposite HTLC
+ nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
+ connect_blocks(&nodes[1], TEST_FINAL_CLTV - (ANTI_REORG_DELAY - 1) - 1);
+ expect_pending_htlcs_forwardable_conditions!(nodes[1],
+ [HTLCDestination::FailedPayment { payment_hash: to_b_failed_payment_hash }]);
+ let bs_htlc_timeout_claim = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(bs_htlc_timeout_claim.len(), 1);
+ check_spends!(bs_htlc_timeout_claim[0], as_txn[0]);
+
+ assert_eq!(bs_pre_spend_claims,
+ sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ connect_blocks(&nodes[1], 1);
+ assert_eq!(bs_pre_spend_claims,
+ sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ // Now confirm the two HTLC timeout transactions for A, checking that the inbound HTLC resolves
+ // after ANTI_REORG_DELAY confirmations and the other takes BREAKDOWN_TIMEOUT confirmations.
+ mine_transaction(&nodes[0], &as_htlc_timeout_claim[0]);
+ let as_timeout_claimable_height = nodes[0].best_block_info().1 + (BREAKDOWN_TIMEOUT as u32) - 1;
+ assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: 1_000_000 - 500_000 - 10_000 - chan_feerate *
+ (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+ confirmation_height: node_a_commitment_claimable,
+ }, Balance::MaybePreimageClaimableHTLC {
+ claimable_amount_satoshis: 20_000,
+ expiry_height: htlc_cltv_timeout,
+ }, Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: 10_000,
+ confirmation_height: as_timeout_claimable_height,
+ }]),
+ sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ mine_transaction(&nodes[0], &bs_htlc_timeout_claim[0]);
+ assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: 1_000_000 - 500_000 - 10_000 - chan_feerate *
+ (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+ confirmation_height: node_a_commitment_claimable,
+ }, Balance::MaybePreimageClaimableHTLC {
+ claimable_amount_satoshis: 20_000,
+ expiry_height: htlc_cltv_timeout,
+ }, Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: 10_000,
+ confirmation_height: as_timeout_claimable_height,
+ }]),
+ sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ // Once as_htlc_timeout_claim[0] reaches ANTI_REORG_DELAY confirmations, we should get a
+ // payment failure event.
+ connect_blocks(&nodes[0], ANTI_REORG_DELAY - 2);
+ expect_payment_failed!(nodes[0], to_b_failed_payment_hash, true);
+
+ connect_blocks(&nodes[0], 1);
+ assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: 1_000_000 - 500_000 - 10_000 - chan_feerate *
+ (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+ confirmation_height: node_a_commitment_claimable,
+ }, Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: 10_000,
+ confirmation_height: core::cmp::max(as_timeout_claimable_height, htlc_cltv_timeout),
+ }]),
+ sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ connect_blocks(&nodes[0], node_a_commitment_claimable - nodes[0].best_block_info().1);
+ assert_eq!(vec![Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: 10_000,
+ confirmation_height: core::cmp::max(as_timeout_claimable_height, htlc_cltv_timeout),
+ }],
+ nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances());
+ test_spendable_output(&nodes[0], &as_txn[0]);
+
+ connect_blocks(&nodes[0], as_timeout_claimable_height - nodes[0].best_block_info().1);
+ assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty());
+ test_spendable_output(&nodes[0], &as_htlc_timeout_claim[0]);
+
+ // The process for B should be completely identical as well, noting that the non-HTLC-balance
+ // was already claimed.
+ mine_transaction(&nodes[1], &bs_htlc_timeout_claim[0]);
+ let bs_timeout_claimable_height = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1;
+ assert_eq!(sorted_vec(vec![Balance::MaybePreimageClaimableHTLC {
+ claimable_amount_satoshis: 10_000,
+ expiry_height: htlc_cltv_timeout,
+ }, Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: 20_000,
+ confirmation_height: bs_timeout_claimable_height,
+ }]),
+ sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ mine_transaction(&nodes[1], &as_htlc_timeout_claim[0]);
+ assert_eq!(sorted_vec(vec![Balance::MaybePreimageClaimableHTLC {
+ claimable_amount_satoshis: 10_000,
+ expiry_height: htlc_cltv_timeout,
+ }, Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: 20_000,
+ confirmation_height: bs_timeout_claimable_height,
+ }]),
+ sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2);
+ expect_payment_failed!(nodes[1], to_a_failed_payment_hash, true);
+
+ assert_eq!(vec![Balance::MaybePreimageClaimableHTLC {
+ claimable_amount_satoshis: 10_000,
+ expiry_height: htlc_cltv_timeout,
+ }],
+ nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances());
+ test_spendable_output(&nodes[1], &bs_htlc_timeout_claim[0]);
+
+ connect_blocks(&nodes[1], 1);
+ assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty());
+}
+
+fn sorted_vec_with_additions<T: Ord + Clone>(v_orig: &Vec<T>, extra_ts: &[&T]) -> Vec<T> {
+ let mut v = v_orig.clone();
+ for t in extra_ts {
+ v.push((*t).clone());
+ }
+ v.sort_unstable();
+ v
+}
+
+fn do_test_revoked_counterparty_commitment_balances(confirm_htlc_spend_first: bool) {
+ // Tests `get_claimable_balances` for revoked counterparty commitment transactions.
+ let mut chanmon_cfgs = create_chanmon_cfgs(2);
+ // We broadcast a second-to-latest commitment transaction, without providing the revocation
+ // secret to the counterparty. However, because we always immediately take the revocation
+ // secret from the keys_manager, we would panic at broadcast as we're trying to sign a
+ // transaction which, from the point of view of our keys_manager, is revoked.
+ chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let (_, _, chan_id, funding_tx) =
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000, InitFeatures::known(), InitFeatures::known());
+ let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
+ assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+
+ // We create five HTLCs for B to claim against A's revoked commitment transaction:
+ //
+ // (1) one for which A is the originator and B knows the preimage
+ // (2) one for which B is the originator where the HTLC has since timed-out
+ // (3) one for which B is the originator but where the HTLC has not yet timed-out
+ // (4) one dust HTLC which is lost in the channel closure
+ // (5) one that actually isn't in the revoked commitment transaction at all, but was added in
+ // later commitment transaction updates
+ //
+ // Though they could all be claimed in a single claim transaction, due to CLTV timeouts they
+ // are all currently claimed in separate transactions, which helps us test as we can claim
+ // HTLCs individually.
+
+ let (claimed_payment_preimage, claimed_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
+ let timeout_payment_hash = route_payment(&nodes[1], &[&nodes[0]], 4_000_000).1;
+ let dust_payment_hash = route_payment(&nodes[1], &[&nodes[0]], 3_000).1;
+
+ let htlc_cltv_timeout = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; // Note ChannelManager adds one to CLTV timeouts for safety
+
+ connect_blocks(&nodes[0], 10);
+ connect_blocks(&nodes[1], 10);
+
+ let live_htlc_cltv_timeout = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; // Note ChannelManager adds one to CLTV timeouts for safety
+ let live_payment_hash = route_payment(&nodes[1], &[&nodes[0]], 5_000_000).1;
+
+ // Get the latest commitment transaction from A and then update the fee to revoke it
+ let as_revoked_txn = get_local_commitment_txn!(nodes[0], chan_id);
+ let opt_anchors = get_opt_anchors!(nodes[0], chan_id);
+
+ let chan_feerate = get_feerate!(nodes[0], chan_id) as u64;
+
+ let missing_htlc_cltv_timeout = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; // Note ChannelManager adds one to CLTV timeouts for safety
+ let missing_htlc_payment_hash = route_payment(&nodes[1], &[&nodes[0]], 2_000_000).1;
+
+ nodes[1].node.claim_funds(claimed_payment_preimage);
+ expect_payment_claimed!(nodes[1], claimed_payment_hash, 3_000_000);
+ check_added_monitors!(nodes[1], 1);
+ let _b_htlc_msgs = get_htlc_update_msgs!(&nodes[1], nodes[0].node.get_our_node_id());
+
+ connect_blocks(&nodes[0], htlc_cltv_timeout + 1 - 10);
+ check_closed_broadcast!(nodes[0], true);
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 6);
+ let mut failed_payments: HashSet<_> =
+ [timeout_payment_hash, dust_payment_hash, live_payment_hash, missing_htlc_payment_hash]
+ .iter().map(|a| *a).collect();
+ events.retain(|ev| {
+ match ev {
+ Event::HTLCHandlingFailed { failed_next_destination: HTLCDestination::NextHopChannel { node_id, channel_id }, .. } => {
+ assert_eq!(*channel_id, chan_id);
+ assert_eq!(*node_id, Some(nodes[1].node.get_our_node_id()));
+ false
+ },
+ Event::HTLCHandlingFailed { failed_next_destination: HTLCDestination::FailedPayment { payment_hash }, .. } => {
+ assert!(failed_payments.remove(payment_hash));
+ false
+ },
+ _ => true,
+ }
+ });
+ assert!(failed_payments.is_empty());
+ if let Event::PendingHTLCsForwardable { .. } = events[0] {} else { panic!(); }
+ match &events[1] {
+ Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {},
+ _ => panic!(),
+ }
+
+ connect_blocks(&nodes[1], htlc_cltv_timeout + 1 - 10);
+ check_closed_broadcast!(nodes[1], true);
+ check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+
+ // Prior to channel closure, B considers the preimage HTLC as its own, and otherwise only
+ // lists the two on-chain timeout-able HTLCs as claimable balances.
+ assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose {
+ claimable_amount_satoshis: 100_000 - 5_000 - 4_000 - 3 - 2_000 + 3_000,
+ }, Balance::MaybeTimeoutClaimableHTLC {
+ claimable_amount_satoshis: 2_000,
+ claimable_height: missing_htlc_cltv_timeout,
+ }, Balance::MaybeTimeoutClaimableHTLC {
+ claimable_amount_satoshis: 4_000,
+ claimable_height: htlc_cltv_timeout,
+ }, Balance::MaybeTimeoutClaimableHTLC {
+ claimable_amount_satoshis: 5_000,
+ claimable_height: live_htlc_cltv_timeout,
+ }]),
+ sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ mine_transaction(&nodes[1], &as_revoked_txn[0]);
+ let mut claim_txn: Vec<_> = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().drain(..).filter(|tx| tx.input.iter().any(|inp| inp.previous_output.txid == as_revoked_txn[0].txid())).collect();
+ // Currently the revoked commitment is claimed in four transactions as the HTLCs all expire
+ // quite soon.
+ assert_eq!(claim_txn.len(), 4);
+ claim_txn.sort_unstable_by_key(|tx| tx.output.iter().map(|output| output.value).sum::<u64>());
+
+ // The following constants were determined experimentally
+ const BS_TO_SELF_CLAIM_EXP_WEIGHT: usize = 483;
+ const OUTBOUND_HTLC_CLAIM_EXP_WEIGHT: usize = 571;
+ const INBOUND_HTLC_CLAIM_EXP_WEIGHT: usize = 578;
+
+ // Check that the weight is close to the expected weight. Note that signature sizes vary
+ // somewhat so it may not always be exact.
+ fuzzy_assert_eq(claim_txn[0].weight(), OUTBOUND_HTLC_CLAIM_EXP_WEIGHT);
+ fuzzy_assert_eq(claim_txn[1].weight(), INBOUND_HTLC_CLAIM_EXP_WEIGHT);
+ fuzzy_assert_eq(claim_txn[2].weight(), INBOUND_HTLC_CLAIM_EXP_WEIGHT);
+ fuzzy_assert_eq(claim_txn[3].weight(), BS_TO_SELF_CLAIM_EXP_WEIGHT);
+
+ // The expected balance for the next three checks, with the largest-HTLC and to_self output
+ // claim balances separated out.
+ let expected_balance = vec![Balance::ClaimableAwaitingConfirmations {
+ // to_remote output in A's revoked commitment
+ claimable_amount_satoshis: 100_000 - 5_000 - 4_000 - 3,
+ confirmation_height: nodes[1].best_block_info().1 + 5,
+ }, Balance::CounterpartyRevokedOutputClaimable {
+ claimable_amount_satoshis: 3_000,
+ }, Balance::CounterpartyRevokedOutputClaimable {
+ claimable_amount_satoshis: 4_000,
+ }];
+
+ let to_self_unclaimed_balance = Balance::CounterpartyRevokedOutputClaimable {
+ claimable_amount_satoshis: 1_000_000 - 100_000 - 3_000 - chan_feerate *
+ (channel::commitment_tx_base_weight(opt_anchors) + 3 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+ };
+ let to_self_claimed_avail_height;
+ let largest_htlc_unclaimed_balance = Balance::CounterpartyRevokedOutputClaimable {
+ claimable_amount_satoshis: 5_000,
+ };
+ let largest_htlc_claimed_avail_height;
+
+ // Once the channel has been closed by A, B now considers all of the commitment transactions'
+ // outputs as `CounterpartyRevokedOutputClaimable`.
+ assert_eq!(sorted_vec_with_additions(&expected_balance, &[&to_self_unclaimed_balance, &largest_htlc_unclaimed_balance]),
+ sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ if confirm_htlc_spend_first {
+ mine_transaction(&nodes[1], &claim_txn[2]);
+ largest_htlc_claimed_avail_height = nodes[1].best_block_info().1 + 5;
+ to_self_claimed_avail_height = nodes[1].best_block_info().1 + 6; // will be claimed in the next block
+ } else {
+ // Connect the to_self output claim, taking all of A's non-HTLC funds
+ mine_transaction(&nodes[1], &claim_txn[3]);
+ to_self_claimed_avail_height = nodes[1].best_block_info().1 + 5;
+ largest_htlc_claimed_avail_height = nodes[1].best_block_info().1 + 6; // will be claimed in the next block
+ }
+
+ let largest_htlc_claimed_balance = Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: 5_000 - chan_feerate * INBOUND_HTLC_CLAIM_EXP_WEIGHT as u64 / 1000,
+ confirmation_height: largest_htlc_claimed_avail_height,
+ };
+ let to_self_claimed_balance = Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: 1_000_000 - 100_000 - 3_000 - chan_feerate *
+ (channel::commitment_tx_base_weight(opt_anchors) + 3 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
+ - chan_feerate * claim_txn[3].weight() as u64 / 1000,
+ confirmation_height: to_self_claimed_avail_height,
+ };
+
+ if confirm_htlc_spend_first {
+ assert_eq!(sorted_vec_with_additions(&expected_balance, &[&to_self_unclaimed_balance, &largest_htlc_claimed_balance]),
+ sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+ } else {
+ assert_eq!(sorted_vec_with_additions(&expected_balance, &[&to_self_claimed_balance, &largest_htlc_unclaimed_balance]),
+ sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+ }
+
+ if confirm_htlc_spend_first {
+ mine_transaction(&nodes[1], &claim_txn[3]);
+ } else {
+ mine_transaction(&nodes[1], &claim_txn[2]);
+ }
+ assert_eq!(sorted_vec_with_additions(&expected_balance, &[&to_self_claimed_balance, &largest_htlc_claimed_balance]),
+ sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ // Finally, connect the last two remaining HTLC spends and check that they move to
+ // `ClaimableAwaitingConfirmations`
+ mine_transaction(&nodes[1], &claim_txn[0]);
+ mine_transaction(&nodes[1], &claim_txn[1]);
+
+ assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+ // to_remote output in A's revoked commitment
+ claimable_amount_satoshis: 100_000 - 5_000 - 4_000 - 3,
+ confirmation_height: nodes[1].best_block_info().1 + 1,
+ }, Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: 1_000_000 - 100_000 - 3_000 - chan_feerate *
+ (channel::commitment_tx_base_weight(opt_anchors) + 3 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
+ - chan_feerate * claim_txn[3].weight() as u64 / 1000,
+ confirmation_height: to_self_claimed_avail_height,
+ }, Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: 3_000 - chan_feerate * OUTBOUND_HTLC_CLAIM_EXP_WEIGHT as u64 / 1000,
+ confirmation_height: nodes[1].best_block_info().1 + 4,
+ }, Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: 4_000 - chan_feerate * INBOUND_HTLC_CLAIM_EXP_WEIGHT as u64 / 1000,
+ confirmation_height: nodes[1].best_block_info().1 + 5,
+ }, Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: 5_000 - chan_feerate * INBOUND_HTLC_CLAIM_EXP_WEIGHT as u64 / 1000,
+ confirmation_height: largest_htlc_claimed_avail_height,
+ }]),
+ sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ connect_blocks(&nodes[1], 1);
+ test_spendable_output(&nodes[1], &as_revoked_txn[0]);
+
+ let mut payment_failed_events = nodes[1].node.get_and_clear_pending_events();
+ expect_payment_failed_conditions_event(&nodes[1], payment_failed_events.pop().unwrap(),
+ dust_payment_hash, true, PaymentFailedConditions::new());
+ expect_payment_failed_conditions_event(&nodes[1], payment_failed_events.pop().unwrap(),
+ missing_htlc_payment_hash, true, PaymentFailedConditions::new());
+ assert!(payment_failed_events.is_empty());
+
+ connect_blocks(&nodes[1], 1);
+ test_spendable_output(&nodes[1], &claim_txn[if confirm_htlc_spend_first { 2 } else { 3 }]);
+ connect_blocks(&nodes[1], 1);
+ test_spendable_output(&nodes[1], &claim_txn[if confirm_htlc_spend_first { 3 } else { 2 }]);
+ expect_payment_failed!(nodes[1], live_payment_hash, true);
+ connect_blocks(&nodes[1], 1);
+ test_spendable_output(&nodes[1], &claim_txn[0]);
+ connect_blocks(&nodes[1], 1);
+ test_spendable_output(&nodes[1], &claim_txn[1]);
+ expect_payment_failed!(nodes[1], timeout_payment_hash, true);
+ assert_eq!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances(), Vec::new());
+}
+
+#[test]
+fn test_revoked_counterparty_commitment_balances() {
+ do_test_revoked_counterparty_commitment_balances(true);
+ do_test_revoked_counterparty_commitment_balances(false);
+}
+
+#[test]
+fn test_revoked_counterparty_htlc_tx_balances() {
+ // Tests `get_claimable_balances` for revocation spends of HTLC transactions.
+ let mut chanmon_cfgs = create_chanmon_cfgs(2);
+ chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Create some initial channels
+ let (_, _, chan_id, funding_tx) =
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 11_000_000, InitFeatures::known(), InitFeatures::known());
+ let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
+ assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+
+ let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
+ let failed_payment_hash = route_payment(&nodes[1], &[&nodes[0]], 1_000_000).1;
+ let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_id);
+ assert_eq!(revoked_local_txn[0].input.len(), 1);
+ assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, funding_tx.txid());
+
+ // The to-be-revoked commitment tx should have two HTLCs and an output for both sides
+ assert_eq!(revoked_local_txn[0].output.len(), 4);
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
+
+ let chan_feerate = get_feerate!(nodes[0], chan_id) as u64;
+ let opt_anchors = get_opt_anchors!(nodes[0], chan_id);
+
+ // B will generate an HTLC-Success from its revoked commitment tx
+ mine_transaction(&nodes[1], &revoked_local_txn[0]);
+ check_closed_broadcast!(nodes[1], true);
+ check_added_monitors!(nodes[1], 1);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ let revoked_htlc_success_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+
+ assert_eq!(revoked_htlc_success_txn.len(), 2);
+ assert_eq!(revoked_htlc_success_txn[0].input.len(), 1);
+ assert_eq!(revoked_htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+ check_spends!(revoked_htlc_success_txn[0], revoked_local_txn[0]);
+ check_spends!(revoked_htlc_success_txn[1], funding_tx);
+
+ connect_blocks(&nodes[1], TEST_FINAL_CLTV);
+ let revoked_htlc_timeout_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(revoked_htlc_timeout_txn.len(), 1);
+ check_spends!(revoked_htlc_timeout_txn[0], revoked_local_txn[0]);
+ assert_ne!(revoked_htlc_success_txn[0].input[0].previous_output, revoked_htlc_timeout_txn[0].input[0].previous_output);
+ assert_eq!(revoked_htlc_success_txn[0].lock_time.0, 0);
+ assert_ne!(revoked_htlc_timeout_txn[0].lock_time.0, 0);
+
+ // A will generate justice tx from B's revoked commitment/HTLC tx
+ mine_transaction(&nodes[0], &revoked_local_txn[0]);
+ check_closed_broadcast!(nodes[0], true);
+ check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+ let to_remote_conf_height = nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1;
+
+ let as_commitment_claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(as_commitment_claim_txn.len(), 2);
+ check_spends!(as_commitment_claim_txn[0], revoked_local_txn[0]);
+ check_spends!(as_commitment_claim_txn[1], funding_tx);
+
+ // The next two checks have the same balance set for A - even though we confirm a revoked HTLC
+ // transaction our balance tracking doesn't use the on-chain value so the
+ // `CounterpartyRevokedOutputClaimable` entry doesn't change.
+ let as_balances = sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+ // to_remote output in B's revoked commitment
+ claimable_amount_satoshis: 1_000_000 - 11_000 - 3_000 - chan_feerate *
+ (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+ confirmation_height: to_remote_conf_height,
+ }, Balance::CounterpartyRevokedOutputClaimable {
+ // to_self output in B's revoked commitment
+ claimable_amount_satoshis: 10_000,
+ }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 1
+ claimable_amount_satoshis: 3_000,
+ }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2
+ claimable_amount_satoshis: 1_000,
+ }]);
+ assert_eq!(as_balances,
+ sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ mine_transaction(&nodes[0], &revoked_htlc_success_txn[0]);
+ let as_htlc_claim_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(as_htlc_claim_tx.len(), 2);
+ check_spends!(as_htlc_claim_tx[0], revoked_htlc_success_txn[0]);
+ check_spends!(as_htlc_claim_tx[1], revoked_local_txn[0]); // A has to generate a new claim for the remaining revoked
+ // outputs (which no longer includes the spent HTLC output)
+
+ assert_eq!(as_balances,
+ sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ assert_eq!(as_htlc_claim_tx[0].output.len(), 1);
+ fuzzy_assert_eq(as_htlc_claim_tx[0].output[0].value,
+ 3_000 - chan_feerate * (revoked_htlc_success_txn[0].weight() + as_htlc_claim_tx[0].weight()) as u64 / 1000);
+
+ mine_transaction(&nodes[0], &as_htlc_claim_tx[0]);
+ assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+ // to_remote output in B's revoked commitment
+ claimable_amount_satoshis: 1_000_000 - 11_000 - 3_000 - chan_feerate *
+ (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+ confirmation_height: to_remote_conf_height,
+ }, Balance::CounterpartyRevokedOutputClaimable {
+ // to_self output in B's revoked commitment
+ claimable_amount_satoshis: 10_000,
+ }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2
+ claimable_amount_satoshis: 1_000,
+ }, Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: as_htlc_claim_tx[0].output[0].value,
+ confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1,
+ }]),
+ sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ connect_blocks(&nodes[0], ANTI_REORG_DELAY - 3);
+ test_spendable_output(&nodes[0], &revoked_local_txn[0]);
+ assert_eq!(sorted_vec(vec![Balance::CounterpartyRevokedOutputClaimable {
+ // to_self output to B
+ claimable_amount_satoshis: 10_000,
+ }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2
+ claimable_amount_satoshis: 1_000,
+ }, Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: as_htlc_claim_tx[0].output[0].value,
+ confirmation_height: nodes[0].best_block_info().1 + 2,
+ }]),
+ sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ connect_blocks(&nodes[0], 2);
+ test_spendable_output(&nodes[0], &as_htlc_claim_tx[0]);
+ assert_eq!(sorted_vec(vec![Balance::CounterpartyRevokedOutputClaimable {
+ // to_self output in B's revoked commitment
+ claimable_amount_satoshis: 10_000,
+ }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2
+ claimable_amount_satoshis: 1_000,
+ }]),
+ sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ connect_blocks(&nodes[0], revoked_htlc_timeout_txn[0].lock_time.0 - nodes[0].best_block_info().1);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(&nodes[0],
+ [HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]);
+ // As time goes on A may split its revocation claim transaction into multiple.
+ let as_fewer_input_rbf = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ for tx in as_fewer_input_rbf.iter() {
+ check_spends!(tx, revoked_local_txn[0]);
+ }
+
+ // Connect a number of additional blocks to ensure we don't forget the HTLC output needs
+ // claiming.
+ connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
+ let as_fewer_input_rbf = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ for tx in as_fewer_input_rbf.iter() {
+ check_spends!(tx, revoked_local_txn[0]);
+ }
+
+ mine_transaction(&nodes[0], &revoked_htlc_timeout_txn[0]);
+ let as_second_htlc_claim_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(as_second_htlc_claim_tx.len(), 2);
+
+ check_spends!(as_second_htlc_claim_tx[0], revoked_htlc_timeout_txn[0]);
+ check_spends!(as_second_htlc_claim_tx[1], revoked_local_txn[0]);
+
+ // Connect blocks to finalize the HTLC resolution with the HTLC-Timeout transaction. In a
+ // previous iteration of the revoked balance handling this would result in us "forgetting" that
+ // the revoked HTLC output still needed to be claimed.
+ connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
+ assert_eq!(sorted_vec(vec![Balance::CounterpartyRevokedOutputClaimable {
+ // to_self output in B's revoked commitment
+ claimable_amount_satoshis: 10_000,
+ }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2
+ claimable_amount_satoshis: 1_000,
+ }]),
+ sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ mine_transaction(&nodes[0], &as_second_htlc_claim_tx[0]);
+ assert_eq!(sorted_vec(vec![Balance::CounterpartyRevokedOutputClaimable {
+ // to_self output in B's revoked commitment
+ claimable_amount_satoshis: 10_000,
+ }, Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: as_second_htlc_claim_tx[0].output[0].value,
+ confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1,
+ }]),
+ sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ mine_transaction(&nodes[0], &as_second_htlc_claim_tx[1]);
+ assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+ // to_self output in B's revoked commitment
+ claimable_amount_satoshis: as_second_htlc_claim_tx[1].output[0].value,
+ confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1,
+ }, Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: as_second_htlc_claim_tx[0].output[0].value,
+ confirmation_height: nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 2,
+ }]),
+ sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ connect_blocks(&nodes[0], ANTI_REORG_DELAY - 2);
+ test_spendable_output(&nodes[0], &as_second_htlc_claim_tx[0]);
+ connect_blocks(&nodes[0], 1);
+ test_spendable_output(&nodes[0], &as_second_htlc_claim_tx[1]);
+
+ assert_eq!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances(), Vec::new());
+}
+
+#[test]
+fn test_revoked_counterparty_aggregated_claims() {
+ // Tests `get_claimable_balances` for revoked counterparty commitment transactions when
+ // claiming with an aggregated claim transaction.
+ let mut chanmon_cfgs = create_chanmon_cfgs(2);
+ // We broadcast a second-to-latest commitment transaction, without providing the revocation
+ // secret to the counterparty. However, because we always immediately take the revocation
+ // secret from the keys_manager, we would panic at broadcast as we're trying to sign a
+ // transaction which, from the point of view of our keys_manager, is revoked.
+ chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let (_, _, chan_id, funding_tx) =
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000, InitFeatures::known(), InitFeatures::known());
+ let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
+ assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+
+ // We create two HTLCs, one which we will give A the preimage to to generate an HTLC-Success
+ // transaction, and one which we will not, allowing B to claim the HTLC output in an aggregated
+ // revocation-claim transaction.
+
+ let (claimed_payment_preimage, claimed_payment_hash, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
+ let revoked_payment_hash = route_payment(&nodes[1], &[&nodes[0]], 4_000_000).1;
+
+ let htlc_cltv_timeout = nodes[1].best_block_info().1 + TEST_FINAL_CLTV + 1; // Note ChannelManager adds one to CLTV timeouts for safety
+
+ // Cheat by giving A's ChannelMonitor the preimage to the to-be-claimed HTLC so that we have an
+ // HTLC-claim transaction on the to-be-revoked state.
+ get_monitor!(nodes[0], chan_id).provide_payment_preimage(&claimed_payment_hash, &claimed_payment_preimage,
+ &node_cfgs[0].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[0].fee_estimator), &nodes[0].logger);
+
+ // Now get the latest commitment transaction from A and then update the fee to revoke it
+ let as_revoked_txn = get_local_commitment_txn!(nodes[0], chan_id);
+
+ assert_eq!(as_revoked_txn.len(), 2);
+ check_spends!(as_revoked_txn[0], funding_tx);
+ check_spends!(as_revoked_txn[1], as_revoked_txn[0]); // The HTLC-Claim transaction
+
+ let opt_anchors = get_opt_anchors!(nodes[0], chan_id);
+ let chan_feerate = get_feerate!(nodes[0], chan_id) as u64;
+
+ {
+ let mut feerate = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate += 1;
+ }
+ nodes[0].node.timer_tick_occurred();
+ check_added_monitors!(nodes[0], 1);
+
+ let fee_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &fee_update.update_fee.unwrap());
+ commitment_signed_dance!(nodes[1], nodes[0], fee_update.commitment_signed, false);
+
+ nodes[0].node.claim_funds(claimed_payment_preimage);
+ expect_payment_claimed!(nodes[0], claimed_payment_hash, 3_000_000);
+ check_added_monitors!(nodes[0], 1);
+ let _a_htlc_msgs = get_htlc_update_msgs!(&nodes[0], nodes[1].node.get_our_node_id());
+
+ assert_eq!(sorted_vec(vec![Balance::ClaimableOnChannelClose {
+ claimable_amount_satoshis: 100_000 - 4_000 - 3_000,
+ }, Balance::MaybeTimeoutClaimableHTLC {
+ claimable_amount_satoshis: 4_000,
+ claimable_height: htlc_cltv_timeout,
+ }, Balance::MaybeTimeoutClaimableHTLC {
+ claimable_amount_satoshis: 3_000,
+ claimable_height: htlc_cltv_timeout,
+ }]),
+ sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ mine_transaction(&nodes[1], &as_revoked_txn[0]);
+ check_closed_broadcast!(nodes[1], true);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+ check_added_monitors!(nodes[1], 1);
+
+ let mut claim_txn: Vec<_> = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().drain(..).filter(|tx| tx.input.iter().any(|inp| inp.previous_output.txid == as_revoked_txn[0].txid())).collect();
+ // Currently the revoked commitment outputs are all claimed in one aggregated transaction
+ assert_eq!(claim_txn.len(), 1);
+ assert_eq!(claim_txn[0].input.len(), 3);
+ check_spends!(claim_txn[0], as_revoked_txn[0]);
+
+ let to_remote_maturity = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1;
+
+ assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+ // to_remote output in A's revoked commitment
+ claimable_amount_satoshis: 100_000 - 4_000 - 3_000,
+ confirmation_height: to_remote_maturity,
+ }, Balance::CounterpartyRevokedOutputClaimable {
+ // to_self output in A's revoked commitment
+ claimable_amount_satoshis: 1_000_000 - 100_000 - chan_feerate *
+ (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+ }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 1
+ claimable_amount_satoshis: 4_000,
+ }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2
+ claimable_amount_satoshis: 3_000,
+ }]),
+ sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ // Confirm A's HTLC-Success tranasction which presumably raced B's claim, causing B to create a
+ // new claim.
+ mine_transaction(&nodes[1], &as_revoked_txn[1]);
+ expect_payment_sent!(nodes[1], claimed_payment_preimage);
+ let mut claim_txn_2: Vec<_> = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+ claim_txn_2.sort_unstable_by_key(|tx| if tx.input.iter().any(|inp| inp.previous_output.txid == as_revoked_txn[0].txid()) { 0 } else { 1 });
+ // Once B sees the HTLC-Success transaction it splits its claim transaction into two, though in
+ // theory it could re-aggregate the claims as well.
+ assert_eq!(claim_txn_2.len(), 2);
+ assert_eq!(claim_txn_2[0].input.len(), 2);
+ check_spends!(claim_txn_2[0], as_revoked_txn[0]);
+ assert_eq!(claim_txn_2[1].input.len(), 1);
+ check_spends!(claim_txn_2[1], as_revoked_txn[1]);
+
+ assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
+ // to_remote output in A's revoked commitment
+ claimable_amount_satoshis: 100_000 - 4_000 - 3_000,
+ confirmation_height: to_remote_maturity,
+ }, Balance::CounterpartyRevokedOutputClaimable {
+ // to_self output in A's revoked commitment
+ claimable_amount_satoshis: 1_000_000 - 100_000 - chan_feerate *
+ (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+ }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 1
+ claimable_amount_satoshis: 4_000,
+ }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2
+ // The amount here is a bit of a misnomer, really its been reduced by the HTLC
+ // transaction fee, but the claimable amount is always a bit of an overshoot for HTLCs
+ // anyway, so its not a big change.
+ claimable_amount_satoshis: 3_000,
+ }]),
+ sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ connect_blocks(&nodes[1], 5);
+ test_spendable_output(&nodes[1], &as_revoked_txn[0]);
+
+ assert_eq!(sorted_vec(vec![Balance::CounterpartyRevokedOutputClaimable {
+ // to_self output in A's revoked commitment
+ claimable_amount_satoshis: 1_000_000 - 100_000 - chan_feerate *
+ (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+ }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 1
+ claimable_amount_satoshis: 4_000,
+ }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 2
+ // The amount here is a bit of a misnomer, really its been reduced by the HTLC
+ // transaction fee, but the claimable amount is always a bit of an overshoot for HTLCs
+ // anyway, so its not a big change.
+ claimable_amount_satoshis: 3_000,
+ }]),
+ sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ mine_transaction(&nodes[1], &claim_txn_2[1]);
+ let htlc_2_claim_maturity = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1;
+
+ assert_eq!(sorted_vec(vec![Balance::CounterpartyRevokedOutputClaimable {
+ // to_self output in A's revoked commitment
+ claimable_amount_satoshis: 1_000_000 - 100_000 - chan_feerate *
+ (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+ }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 1
+ claimable_amount_satoshis: 4_000,
+ }, Balance::ClaimableAwaitingConfirmations { // HTLC 2
+ claimable_amount_satoshis: claim_txn_2[1].output[0].value,
+ confirmation_height: htlc_2_claim_maturity,
+ }]),
+ sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ connect_blocks(&nodes[1], 5);
+ test_spendable_output(&nodes[1], &claim_txn_2[1]);
+
+ assert_eq!(sorted_vec(vec![Balance::CounterpartyRevokedOutputClaimable {
+ // to_self output in A's revoked commitment
+ claimable_amount_satoshis: 1_000_000 - 100_000 - chan_feerate *
+ (channel::commitment_tx_base_weight(opt_anchors) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000,
+ }, Balance::CounterpartyRevokedOutputClaimable { // HTLC 1
+ claimable_amount_satoshis: 4_000,
+ }]),
+ sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
+
+ mine_transaction(&nodes[1], &claim_txn_2[0]);
+ let rest_claim_maturity = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1;
+
+ assert_eq!(vec![Balance::ClaimableAwaitingConfirmations {
+ claimable_amount_satoshis: claim_txn_2[0].output[0].value,
+ confirmation_height: rest_claim_maturity,
+ }],
+ nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances());
+
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); // We shouldn't fail the payment until we spend the output
+
+ connect_blocks(&nodes[1], 5);
+ expect_payment_failed!(nodes[1], revoked_payment_hash, true);
+ test_spendable_output(&nodes[1], &claim_txn_2[0]);
+ assert!(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty());
+}
use io::{self, Read};
use io_extras::read_to_end;
-use util::events::MessageSendEventsProvider;
+use util::events::{MessageSendEventsProvider, OnionMessageProvider};
use util::logger;
-use util::ser::{LengthReadable, Readable, ReadableArgs, Writeable, Writer, FixedLengthReader, HighZeroBytesDroppedVarInt, Hostname};
+use util::ser::{BigSize, LengthReadable, Readable, ReadableArgs, Writeable, Writer, FixedLengthReader, HighZeroBytesDroppedBigSize, Hostname};
use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
/// Handle an incoming channel_update message, returning true if it should be forwarded on,
/// false or returning an Err otherwise.
fn handle_channel_update(&self, msg: &ChannelUpdate) -> Result<bool, LightningError>;
- /// Gets a subset of the channel announcements and updates required to dump our routing table
- /// to a remote node, starting at the short_channel_id indicated by starting_point and
- /// including the batch_amount entries immediately higher in numerical value than starting_point.
- fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)>;
- /// Gets a subset of the node announcements required to dump our routing table to a remote node,
- /// starting at the node *after* the provided publickey and including batch_amount entries
- /// immediately higher (as defined by <PublicKey as Ord>::cmp) than starting_point.
+ /// Gets channel announcements and updates required to dump our routing table to a remote node,
+ /// starting at the short_channel_id indicated by starting_point and including announcements
+ /// for a single channel.
+ fn get_next_channel_announcement(&self, starting_point: u64) -> Option<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)>;
+ /// Gets a node announcement required to dump our routing table to a remote node, starting at
+ /// the node *after* the provided pubkey and including up to one announcement immediately
+ /// higher (as defined by <PublicKey as Ord>::cmp) than starting_point.
/// If None is provided for starting_point, we start at the first node.
- fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec<NodeAnnouncement>;
+ fn get_next_node_announcement(&self, starting_point: Option<&PublicKey>) -> Option<NodeAnnouncement>;
/// Called when a connection is established with a peer. This can be used to
/// perform routing table synchronization using a strategy defined by the
/// implementor.
fn handle_query_short_channel_ids(&self, their_node_id: &PublicKey, msg: QueryShortChannelIds) -> Result<(), LightningError>;
}
+/// A trait to describe an object that can receive onion messages.
+pub trait OnionMessageHandler : OnionMessageProvider {
+ /// Handle an incoming onion_message message from the given peer.
+ fn handle_onion_message(&self, peer_node_id: &PublicKey, msg: &OnionMessage);
+ /// Called when a connection is established with a peer. Can be used to track which peers
+ /// advertise onion message support and are online.
+ fn peer_connected(&self, their_node_id: &PublicKey, init: &Init);
+ /// Indicates a connection to the peer failed/an existing connection was lost. Allows handlers to
+ /// drop and refuse to forward onion messages to this peer.
+ fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool);
+}
+
mod fuzzy_internal_msgs {
use prelude::*;
use ln::{PaymentPreimage, PaymentSecret};
impl Writeable for FinalOnionHopData {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
self.payment_secret.0.write(w)?;
- HighZeroBytesDroppedVarInt(self.total_msat).write(w)
+ HighZeroBytesDroppedBigSize(self.total_msat).write(w)
}
}
impl Readable for FinalOnionHopData {
fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
let secret: [u8; 32] = Readable::read(r)?;
- let amt: HighZeroBytesDroppedVarInt<u64> = Readable::read(r)?;
+ let amt: HighZeroBytesDroppedBigSize<u64> = Readable::read(r)?;
Ok(Self { payment_secret: PaymentSecret(secret), total_msat: amt.0 })
}
}
},
OnionHopDataFormat::NonFinalNode { short_channel_id } => {
encode_varint_length_prefixed_tlv!(w, {
- (2, HighZeroBytesDroppedVarInt(self.amt_to_forward), required),
- (4, HighZeroBytesDroppedVarInt(self.outgoing_cltv_value), required),
+ (2, HighZeroBytesDroppedBigSize(self.amt_to_forward), required),
+ (4, HighZeroBytesDroppedBigSize(self.outgoing_cltv_value), required),
(6, short_channel_id, required)
});
},
OnionHopDataFormat::FinalNode { ref payment_data, ref keysend_preimage } => {
encode_varint_length_prefixed_tlv!(w, {
- (2, HighZeroBytesDroppedVarInt(self.amt_to_forward), required),
- (4, HighZeroBytesDroppedVarInt(self.outgoing_cltv_value), required),
+ (2, HighZeroBytesDroppedBigSize(self.amt_to_forward), required),
+ (4, HighZeroBytesDroppedBigSize(self.outgoing_cltv_value), required),
(8, payment_data, option),
(5482373484, keysend_preimage, option)
});
}
}
-// ReadableArgs because we need onion_utils::decode_next_hop to accommodate payment packets and
-// onion message packets.
-impl ReadableArgs<()> for OnionHopData {
- fn read<R: Read>(r: &mut R, _arg: ()) -> Result<Self, DecodeError> {
- <Self as Readable>::read(r)
- }
-}
-
impl Readable for OnionHopData {
- fn read<R: Read>(mut r: &mut R) -> Result<Self, DecodeError> {
- use bitcoin::consensus::encode::{Decodable, Error, VarInt};
- let v: VarInt = Decodable::consensus_decode(&mut r)
- .map_err(|e| match e {
- Error::Io(ioe) => DecodeError::from(ioe),
- _ => DecodeError::InvalidValue
- })?;
+ fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let b: BigSize = Readable::read(r)?;
const LEGACY_ONION_HOP_FLAG: u64 = 0;
- let (format, amt, cltv_value) = if v.0 != LEGACY_ONION_HOP_FLAG {
- let mut rd = FixedLengthReader::new(r, v.0);
- let mut amt = HighZeroBytesDroppedVarInt(0u64);
- let mut cltv_value = HighZeroBytesDroppedVarInt(0u32);
+ let (format, amt, cltv_value) = if b.0 != LEGACY_ONION_HOP_FLAG {
+ let mut rd = FixedLengthReader::new(r, b.0);
+ let mut amt = HighZeroBytesDroppedBigSize(0u64);
+ let mut cltv_value = HighZeroBytesDroppedBigSize(0u32);
let mut short_id: Option<u64> = None;
let mut payment_data: Option<FinalOnionHopData> = None;
let mut keysend_preimage: Option<PaymentPreimage> = None;
- // The TLV type is chosen to be compatible with lnd and c-lightning.
decode_tlv_stream!(&mut rd, {
(2, amt, required),
(4, cltv_value, required),
(6, short_id, option),
(8, payment_data, option),
+ // See https://github.com/lightning/blips/blob/master/blip-0003.md
(5482373484, keysend_preimage, option)
});
rd.eat_remaining().map_err(|_| DecodeError::ShortRead)?;
}
}
+// ReadableArgs because we need onion_utils::decode_next_hop to accommodate payment packets and
+// onion message packets.
+impl ReadableArgs<()> for OnionHopData {
+ fn read<R: Read>(r: &mut R, _arg: ()) -> Result<Self, DecodeError> {
+ <Self as Readable>::read(r)
+ }
+}
+
impl Writeable for Ping {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
self.ponglen.write(w)?;
use bitcoin::secp256k1::{PublicKey,SecretKey};
use bitcoin::secp256k1::{Secp256k1, Message};
- use io::Cursor;
+ use io::{self, Cursor};
use prelude::*;
use core::convert::TryFrom;
assert_eq!(gossip_timestamp_filter.first_timestamp, 1590000000);
assert_eq!(gossip_timestamp_filter.timestamp_range, 0xffff_ffff);
}
+
+ #[test]
+ fn decode_onion_hop_data_len_as_bigsize() {
+ // Tests that we can decode an onion payload that is >253 bytes.
+ // Previously, receiving a payload of this size could've caused us to fail to decode a valid
+ // payload, because we were decoding the length (a BigSize, big-endian) as a VarInt
+ // (little-endian).
+
+ // Encode a test onion payload with a big custom TLV such that it's >253 bytes, forcing the
+ // payload length to be encoded over multiple bytes rather than a single u8.
+ let big_payload = encode_big_payload().unwrap();
+ let mut rd = Cursor::new(&big_payload[..]);
+ <msgs::OnionHopData as Readable>::read(&mut rd).unwrap();
+ }
+ // see above test, needs to be a separate method for use of the serialization macros.
+ fn encode_big_payload() -> Result<Vec<u8>, io::Error> {
+ use util::ser::HighZeroBytesDroppedBigSize;
+ let payload = msgs::OnionHopData {
+ format: OnionHopDataFormat::NonFinalNode {
+ short_channel_id: 0xdeadbeef1bad1dea,
+ },
+ amt_to_forward: 1000,
+ outgoing_cltv_value: 0xffffffff,
+ };
+ let mut encoded_payload = Vec::new();
+ let test_bytes = vec![42u8; 1000];
+ if let OnionHopDataFormat::NonFinalNode { short_channel_id } = payload.format {
+ encode_varint_length_prefixed_tlv!(&mut encoded_payload, {
+ (1, test_bytes, vec_type),
+ (2, HighZeroBytesDroppedBigSize(payload.amt_to_forward), required),
+ (4, HighZeroBytesDroppedBigSize(payload.outgoing_cltv_value), required),
+ (6, short_channel_id, required)
+ });
+ }
+ Ok(encoded_payload)
+ }
}
use bitcoin::hashes::hmac::{Hmac, HmacEngine};
use bitcoin::hashes::sha256::Hash as Sha256;
-use bitcoin::secp256k1::{SecretKey,PublicKey};
+use bitcoin::secp256k1::{SecretKey, PublicKey, Scalar};
use bitcoin::secp256k1::Secp256k1;
use bitcoin::secp256k1::ecdh::SharedSecret;
use bitcoin::secp256k1;
Hmac::from_engine(hmac).into_inner()
}
-pub(crate) fn next_hop_packet_pubkey<T: secp256k1::Signing + secp256k1::Verification>(secp_ctx: &Secp256k1<T>, mut packet_pubkey: PublicKey, packet_shared_secret: &[u8; 32]) -> Result<PublicKey, secp256k1::Error> {
+pub(crate) fn next_hop_packet_pubkey<T: secp256k1::Signing + secp256k1::Verification>(secp_ctx: &Secp256k1<T>, packet_pubkey: PublicKey, packet_shared_secret: &[u8; 32]) -> Result<PublicKey, secp256k1::Error> {
let blinding_factor = {
let mut sha = Sha256::engine();
sha.input(&packet_pubkey.serialize()[..]);
Sha256::from_engine(sha).into_inner()
};
- packet_pubkey.mul_assign(secp_ctx, &blinding_factor[..]).map(|_| packet_pubkey)
+ packet_pubkey.mul_tweak(secp_ctx, &Scalar::from_be_bytes(blinding_factor).unwrap())
}
// can only fail if an intermediary hop has an invalid public key or session_priv is invalid
let ephemeral_pubkey = blinded_pub;
- blinded_priv.mul_assign(&blinding_factor)?;
+ blinded_priv = blinded_priv.mul_tweak(&Scalar::from_be_bytes(blinding_factor).unwrap())?;
blinded_pub = PublicKey::from_secret_key(secp_ctx, &blinded_priv);
callback(shared_secret, blinding_factor, ephemeral_pubkey, hop, idx);
use util::ser::{ReadableArgs, Writeable};
use io;
-use bitcoin::{Block, BlockHeader, BlockHash};
+use bitcoin::{Block, BlockHeader, BlockHash, TxMerkleNode};
+use bitcoin::hashes::Hash;
use bitcoin::network::constants::Network;
use prelude::*;
check_added_monitors!(nodes[1], 1);
expect_payment_claimed!(nodes[1], payment_hash, 10_000_000);
- let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone()]});
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
use ln::features::InitFeatures;
use ln::msgs;
-use ln::msgs::{ChannelMessageHandler, LightningError, NetAddress, RoutingMessageHandler};
+use ln::msgs::{ChannelMessageHandler, LightningError, NetAddress, OnionMessageHandler, RoutingMessageHandler};
use ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager};
use util::ser::{VecWriter, Writeable, Writer};
use ln::peer_channel_encryptor::{PeerChannelEncryptor,NextNoiseStep};
use ln::wire::Encode;
use routing::gossip::{NetworkGraph, P2PGossipSync};
use util::atomic_counter::AtomicCounter;
-use util::events::{MessageSendEvent, MessageSendEventsProvider};
+use util::events::{MessageSendEvent, MessageSendEventsProvider, OnionMessageProvider};
use util::logger::Logger;
use prelude::*;
fn handle_node_announcement(&self, _msg: &msgs::NodeAnnouncement) -> Result<bool, LightningError> { Ok(false) }
fn handle_channel_announcement(&self, _msg: &msgs::ChannelAnnouncement) -> Result<bool, LightningError> { Ok(false) }
fn handle_channel_update(&self, _msg: &msgs::ChannelUpdate) -> Result<bool, LightningError> { Ok(false) }
- fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) ->
- Vec<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> { Vec::new() }
- fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec<msgs::NodeAnnouncement> { Vec::new() }
+ fn get_next_channel_announcement(&self, _starting_point: u64) ->
+ Option<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> { None }
+ fn get_next_node_announcement(&self, _starting_point: Option<&PublicKey>) -> Option<msgs::NodeAnnouncement> { None }
fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) {}
fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyChannelRange) -> Result<(), LightningError> { Ok(()) }
fn handle_reply_short_channel_ids_end(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) }
fn handle_query_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::QueryChannelRange) -> Result<(), LightningError> { Ok(()) }
fn handle_query_short_channel_ids(&self, _their_node_id: &PublicKey, _msg: msgs::QueryShortChannelIds) -> Result<(), LightningError> { Ok(()) }
}
+impl OnionMessageProvider for IgnoringMessageHandler {
+ fn next_onion_message_for_peer(&self, _peer_node_id: PublicKey) -> Option<msgs::OnionMessage> { None }
+}
+impl OnionMessageHandler for IgnoringMessageHandler {
+ fn handle_onion_message(&self, _their_node_id: &PublicKey, _msg: &msgs::OnionMessage) {}
+ fn peer_connected(&self, _their_node_id: &PublicKey, _init: &msgs::Init) {}
+ fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
+}
impl Deref for IgnoringMessageHandler {
type Target = IgnoringMessageHandler;
fn deref(&self) -> &Self { self }
}
/// Provides references to trait impls which handle different types of messages.
-pub struct MessageHandler<CM: Deref, RM: Deref> where
+pub struct MessageHandler<CM: Deref, RM: Deref, OM: Deref> where
CM::Target: ChannelMessageHandler,
- RM::Target: RoutingMessageHandler {
+ RM::Target: RoutingMessageHandler,
+ OM::Target: OnionMessageHandler,
+{
/// A message handler which handles messages specific to channels. Usually this is just a
/// [`ChannelManager`] object or an [`ErroringMessageHandler`].
///
///
/// [`P2PGossipSync`]: crate::routing::gossip::P2PGossipSync
pub route_handler: RM,
+
+ /// A message handler which handles onion messages. For now, this can only be an
+ /// [`IgnoringMessageHandler`].
+ pub onion_message_handler: OM,
}
/// Provides an object which can be used to send data to and which uniquely identifies a connection
/// we have fewer than this many messages in the outbound buffer again.
/// We also use this as the target number of outbound gossip messages to keep in the write buffer,
/// refilled as we send bytes.
-const OUTBOUND_BUFFER_LIMIT_READ_PAUSE: usize = 10;
+const OUTBOUND_BUFFER_LIMIT_READ_PAUSE: usize = 12;
/// When the outbound buffer has this many messages, we'll simply skip relaying gossip messages to
/// the peer.
const OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP: usize = OUTBOUND_BUFFER_LIMIT_READ_PAUSE * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO;
/// tick. Once we have sent this many messages since the last ping, we send a ping right away to
/// ensures we don't just fill up our send buffer and leave the peer with too many messages to
/// process before the next ping.
+///
+/// Note that we continue responding to other messages even after we've sent this many messages, so
+/// it's more of a general guideline used for gossip backfill (and gossip forwarding, times
+/// [`FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO`]) than a hard limit.
const BUFFER_DRAIN_MSGS_PER_TICK: usize = 32;
struct Peer {
pending_outbound_buffer: LinkedList<Vec<u8>>,
pending_outbound_buffer_first_msg_offset: usize,
+ // Queue gossip broadcasts separately from `pending_outbound_buffer` so we can easily prioritize
+ // channel messages over them.
+ gossip_broadcast_buffer: LinkedList<Vec<u8>>,
awaiting_write_event: bool,
pending_read_buffer: Vec<u8>,
InitSyncTracker::NodesSyncing(pk) => pk < node_id,
}
}
+
+ /// Returns whether we should be reading bytes from this peer, based on whether its outbound
+ /// buffer still has space and we don't need to pause reads to get some writes out.
+ fn should_read(&self) -> bool {
+ self.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE
+ }
+
+ /// Determines if we should push additional gossip background sync (aka "backfill") onto a peer's
+ /// outbound buffer. This is checked every time the peer's buffer may have been drained.
+ fn should_buffer_gossip_backfill(&self) -> bool {
+ self.pending_outbound_buffer.is_empty() && self.gossip_broadcast_buffer.is_empty()
+ && self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
+ }
+
+ /// Determines if we should push an onion message onto a peer's outbound buffer. This is checked
+ /// every time the peer's buffer may have been drained.
+ fn should_buffer_onion_message(&self) -> bool {
+ self.pending_outbound_buffer.is_empty()
+ && self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
+ }
+
+ /// Determines if we should push additional gossip broadcast messages onto a peer's outbound
+ /// buffer. This is checked every time the peer's buffer may have been drained.
+ fn should_buffer_gossip_broadcast(&self) -> bool {
+ self.pending_outbound_buffer.is_empty()
+ && self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK
+ }
+
+ /// Returns whether this peer's outbound buffers are full and we should drop gossip broadcasts.
+ fn buffer_full_drop_gossip_broadcast(&self) -> bool {
+ let total_outbound_buffered =
+ self.gossip_broadcast_buffer.len() + self.pending_outbound_buffer.len();
+
+ total_outbound_buffered > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP ||
+ self.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO
+ }
}
/// SimpleArcPeerManager is useful when you need a PeerManager with a static lifetime, e.g.
/// issues such as overly long function definitions.
///
/// (C-not exported) as Arcs don't make sense in bindings
-pub type SimpleArcPeerManager<SD, M, T, F, C, L> = PeerManager<SD, Arc<SimpleArcChannelManager<M, T, F, L>>, Arc<P2PGossipSync<Arc<NetworkGraph<Arc<L>>>, Arc<C>, Arc<L>>>, Arc<L>, Arc<IgnoringMessageHandler>>;
+pub type SimpleArcPeerManager<SD, M, T, F, C, L> = PeerManager<SD, Arc<SimpleArcChannelManager<M, T, F, L>>, Arc<P2PGossipSync<Arc<NetworkGraph<Arc<L>>>, Arc<C>, Arc<L>>>, IgnoringMessageHandler, Arc<L>, Arc<IgnoringMessageHandler>>;
/// SimpleRefPeerManager is a type alias for a PeerManager reference, and is the reference
/// counterpart to the SimpleArcPeerManager type alias. Use this type by default when you don't
/// helps with issues such as long function definitions.
///
/// (C-not exported) as Arcs don't make sense in bindings
-pub type SimpleRefPeerManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, SD, M, T, F, C, L> = PeerManager<SD, SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L>, &'e P2PGossipSync<&'g NetworkGraph<&'f L>, &'h C, &'f L>, &'f L, IgnoringMessageHandler>;
+pub type SimpleRefPeerManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, SD, M, T, F, C, L> = PeerManager<SD, SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L>, &'e P2PGossipSync<&'g NetworkGraph<&'f L>, &'h C, &'f L>, IgnoringMessageHandler, &'f L, IgnoringMessageHandler>;
/// A PeerManager manages a set of peers, described by their [`SocketDescriptor`] and marshalls
/// socket events into messages which it passes on to its [`MessageHandler`].
/// you're using lightning-net-tokio.
///
/// [`read_event`]: PeerManager::read_event
-pub struct PeerManager<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> where
+pub struct PeerManager<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CMH: Deref> where
CM::Target: ChannelMessageHandler,
RM::Target: RoutingMessageHandler,
+ OM::Target: OnionMessageHandler,
L::Target: Logger,
CMH::Target: CustomMessageHandler {
- message_handler: MessageHandler<CM, RM>,
+ message_handler: MessageHandler<CM, RM, OM>,
/// Connection state for each connected peer - we have an outer read-write lock which is taken
/// as read while we're doing processing for a peer and taken write when a peer is being added
/// or removed.
}}
}
-impl<Descriptor: SocketDescriptor, CM: Deref, L: Deref> PeerManager<Descriptor, CM, IgnoringMessageHandler, L, IgnoringMessageHandler> where
+impl<Descriptor: SocketDescriptor, CM: Deref, OM: Deref, L: Deref> PeerManager<Descriptor, CM, IgnoringMessageHandler, OM, L, IgnoringMessageHandler> where
CM::Target: ChannelMessageHandler,
+ OM::Target: OnionMessageHandler,
L::Target: Logger {
- /// Constructs a new PeerManager with the given ChannelMessageHandler. No routing message
- /// handler is used and network graph messages are ignored.
+ /// Constructs a new `PeerManager` with the given `ChannelMessageHandler` and
+ /// `OnionMessageHandler`. No routing message handler is used and network graph messages are
+ /// ignored.
///
/// ephemeral_random_data is used to derive per-connection ephemeral keys and must be
/// cryptographically secure random bytes.
///
/// (C-not exported) as we can't export a PeerManager with a dummy route handler
- pub fn new_channel_only(channel_message_handler: CM, our_node_secret: SecretKey, ephemeral_random_data: &[u8; 32], logger: L) -> Self {
+ pub fn new_channel_only(channel_message_handler: CM, onion_message_handler: OM, our_node_secret: SecretKey, ephemeral_random_data: &[u8; 32], logger: L) -> Self {
Self::new(MessageHandler {
chan_handler: channel_message_handler,
route_handler: IgnoringMessageHandler{},
+ onion_message_handler,
}, our_node_secret, ephemeral_random_data, logger, IgnoringMessageHandler{})
}
}
-impl<Descriptor: SocketDescriptor, RM: Deref, L: Deref> PeerManager<Descriptor, ErroringMessageHandler, RM, L, IgnoringMessageHandler> where
+impl<Descriptor: SocketDescriptor, RM: Deref, L: Deref> PeerManager<Descriptor, ErroringMessageHandler, RM, IgnoringMessageHandler, L, IgnoringMessageHandler> where
RM::Target: RoutingMessageHandler,
L::Target: Logger {
- /// Constructs a new PeerManager with the given RoutingMessageHandler. No channel message
- /// handler is used and messages related to channels will be ignored (or generate error
- /// messages). Note that some other lightning implementations time-out connections after some
- /// time if no channel is built with the peer.
+ /// Constructs a new `PeerManager` with the given `RoutingMessageHandler`. No channel message
+ /// handler or onion message handler is used and onion and channel messages will be ignored (or
+ /// generate error messages). Note that some other lightning implementations time-out connections
+ /// after some time if no channel is built with the peer.
///
/// ephemeral_random_data is used to derive per-connection ephemeral keys and must be
/// cryptographically secure random bytes.
Self::new(MessageHandler {
chan_handler: ErroringMessageHandler::new(),
route_handler: routing_message_handler,
+ onion_message_handler: IgnoringMessageHandler{},
}, our_node_secret, ephemeral_random_data, logger, IgnoringMessageHandler{})
}
}
}
}
-impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> PeerManager<Descriptor, CM, RM, L, CMH> where
+impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CMH: Deref> PeerManager<Descriptor, CM, RM, OM, L, CMH> where
CM::Target: ChannelMessageHandler,
RM::Target: RoutingMessageHandler,
+ OM::Target: OnionMessageHandler,
L::Target: Logger,
CMH::Target: CustomMessageHandler {
/// Constructs a new PeerManager with the given message handlers and node_id secret key
/// ephemeral_random_data is used to derive per-connection ephemeral keys and must be
/// cryptographically secure random bytes.
- pub fn new(message_handler: MessageHandler<CM, RM>, our_node_secret: SecretKey, ephemeral_random_data: &[u8; 32], logger: L, custom_message_handler: CMH) -> Self {
+ pub fn new(message_handler: MessageHandler<CM, RM, OM>, our_node_secret: SecretKey, ephemeral_random_data: &[u8; 32], logger: L, custom_message_handler: CMH) -> Self {
let mut ephemeral_key_midstate = Sha256::engine();
ephemeral_key_midstate.input(ephemeral_random_data);
pending_outbound_buffer: LinkedList::new(),
pending_outbound_buffer_first_msg_offset: 0,
+ gossip_broadcast_buffer: LinkedList::new(),
awaiting_write_event: false,
pending_read_buffer,
pending_outbound_buffer: LinkedList::new(),
pending_outbound_buffer_first_msg_offset: 0,
+ gossip_broadcast_buffer: LinkedList::new(),
awaiting_write_event: false,
pending_read_buffer,
fn do_attempt_write_data(&self, descriptor: &mut Descriptor, peer: &mut Peer) {
while !peer.awaiting_write_event {
- if peer.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE && peer.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK {
+ if peer.should_buffer_onion_message() {
+ if let Some(peer_node_id) = peer.their_node_id {
+ if let Some(next_onion_message) =
+ self.message_handler.onion_message_handler.next_onion_message_for_peer(peer_node_id) {
+ self.enqueue_message(peer, &next_onion_message);
+ }
+ }
+ }
+ if peer.should_buffer_gossip_broadcast() {
+ if let Some(msg) = peer.gossip_broadcast_buffer.pop_front() {
+ peer.pending_outbound_buffer.push_back(msg);
+ }
+ }
+ if peer.should_buffer_gossip_backfill() {
match peer.sync_status {
InitSyncTracker::NoSyncRequested => {},
InitSyncTracker::ChannelsSyncing(c) if c < 0xffff_ffff_ffff_ffff => {
- let steps = ((OUTBOUND_BUFFER_LIMIT_READ_PAUSE - peer.pending_outbound_buffer.len() + 2) / 3) as u8;
- let all_messages = self.message_handler.route_handler.get_next_channel_announcements(c, steps);
- for &(ref announce, ref update_a_option, ref update_b_option) in all_messages.iter() {
- self.enqueue_message(peer, announce);
- if let &Some(ref update_a) = update_a_option {
- self.enqueue_message(peer, update_a);
+ if let Some((announce, update_a_option, update_b_option)) =
+ self.message_handler.route_handler.get_next_channel_announcement(c)
+ {
+ self.enqueue_message(peer, &announce);
+ if let Some(update_a) = update_a_option {
+ self.enqueue_message(peer, &update_a);
}
- if let &Some(ref update_b) = update_b_option {
- self.enqueue_message(peer, update_b);
+ if let Some(update_b) = update_b_option {
+ self.enqueue_message(peer, &update_b);
}
peer.sync_status = InitSyncTracker::ChannelsSyncing(announce.contents.short_channel_id + 1);
- }
- if all_messages.is_empty() || all_messages.len() != steps as usize {
+ } else {
peer.sync_status = InitSyncTracker::ChannelsSyncing(0xffff_ffff_ffff_ffff);
}
},
InitSyncTracker::ChannelsSyncing(c) if c == 0xffff_ffff_ffff_ffff => {
- let steps = (OUTBOUND_BUFFER_LIMIT_READ_PAUSE - peer.pending_outbound_buffer.len()) as u8;
- let all_messages = self.message_handler.route_handler.get_next_node_announcements(None, steps);
- for msg in all_messages.iter() {
- self.enqueue_message(peer, msg);
+ if let Some(msg) = self.message_handler.route_handler.get_next_node_announcement(None) {
+ self.enqueue_message(peer, &msg);
peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id);
- }
- if all_messages.is_empty() || all_messages.len() != steps as usize {
+ } else {
peer.sync_status = InitSyncTracker::NoSyncRequested;
}
},
InitSyncTracker::ChannelsSyncing(_) => unreachable!(),
InitSyncTracker::NodesSyncing(key) => {
- let steps = (OUTBOUND_BUFFER_LIMIT_READ_PAUSE - peer.pending_outbound_buffer.len()) as u8;
- let all_messages = self.message_handler.route_handler.get_next_node_announcements(Some(&key), steps);
- for msg in all_messages.iter() {
- self.enqueue_message(peer, msg);
+ if let Some(msg) = self.message_handler.route_handler.get_next_node_announcement(Some(&key)) {
+ self.enqueue_message(peer, &msg);
peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id);
- }
- if all_messages.is_empty() || all_messages.len() != steps as usize {
+ } else {
peer.sync_status = InitSyncTracker::NoSyncRequested;
}
},
self.maybe_send_extra_ping(peer);
}
- if {
- let next_buff = match peer.pending_outbound_buffer.front() {
- None => return,
- Some(buff) => buff,
- };
+ let next_buff = match peer.pending_outbound_buffer.front() {
+ None => return,
+ Some(buff) => buff,
+ };
- let should_be_reading = peer.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE;
- let pending = &next_buff[peer.pending_outbound_buffer_first_msg_offset..];
- let data_sent = descriptor.send_data(pending, should_be_reading);
- peer.pending_outbound_buffer_first_msg_offset += data_sent;
- if peer.pending_outbound_buffer_first_msg_offset == next_buff.len() { true } else { false }
- } {
+ let pending = &next_buff[peer.pending_outbound_buffer_first_msg_offset..];
+ let data_sent = descriptor.send_data(pending, peer.should_read());
+ peer.pending_outbound_buffer_first_msg_offset += data_sent;
+ if peer.pending_outbound_buffer_first_msg_offset == next_buff.len() {
peer.pending_outbound_buffer_first_msg_offset = 0;
peer.pending_outbound_buffer.pop_front();
} else {
}
}
- /// Append a message to a peer's pending outbound/write buffer
- fn enqueue_encoded_message(&self, peer: &mut Peer, encoded_message: &Vec<u8>) {
- peer.msgs_sent_since_pong += 1;
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_message[..]));
- }
-
/// Append a message to a peer's pending outbound/write buffer
fn enqueue_message<M: wire::Type>(&self, peer: &mut Peer, message: &M) {
let mut buffer = VecWriter(Vec::with_capacity(2048));
} else {
log_trace!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap()))
}
- self.enqueue_encoded_message(peer, &buffer.0);
+ peer.msgs_sent_since_pong += 1;
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&buffer.0[..]));
+ }
+
+ /// Append a message to a peer's pending outbound/write gossip broadcast buffer
+ fn enqueue_encoded_gossip_broadcast(&self, peer: &mut Peer, encoded_message: &Vec<u8>) {
+ peer.msgs_sent_since_pong += 1;
+ peer.gossip_broadcast_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_message[..]));
}
fn do_read_event(&self, peer_descriptor: &mut Descriptor, data: &[u8]) -> Result<bool, PeerHandleError> {
}
}
}
- pause_read = peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_READ_PAUSE;
+ pause_read = !peer.should_read();
if let Some(message) = msg_to_handle {
match self.handle_message(&peer_mutex, peer_lock, message) {
}
self.message_handler.route_handler.peer_connected(&their_node_id, &msg);
-
self.message_handler.chan_handler.peer_connected(&their_node_id, &msg);
+ self.message_handler.onion_message_handler.peer_connected(&their_node_id, &msg);
+
peer_lock.their_features = Some(msg.features);
return Ok(None);
} else if peer_lock.their_features.is_none() {
self.message_handler.route_handler.handle_reply_channel_range(&their_node_id, msg)?;
},
+ // Onion message:
+ wire::Message::OnionMessage(msg) => {
+ self.message_handler.onion_message_handler.handle_onion_message(&their_node_id, &msg);
+ },
+
// Unknown messages:
wire::Message::Unknown(type_id) if message.is_even() => {
log_debug!(self.logger, "Received unknown even message of type {}, disconnecting peer!", type_id);
!peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
continue
}
- if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP
- || peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO
- {
+ if peer.buffer_full_drop_gossip_broadcast() {
log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
continue;
}
if except_node.is_some() && peer.their_node_id.as_ref() == except_node {
continue;
}
- self.enqueue_encoded_message(&mut *peer, &encoded_msg);
+ self.enqueue_encoded_gossip_broadcast(&mut *peer, &encoded_msg);
}
},
wire::Message::NodeAnnouncement(ref msg) => {
!peer.should_forward_node_announcement(msg.contents.node_id) {
continue
}
- if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP
- || peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO
- {
+ if peer.buffer_full_drop_gossip_broadcast() {
log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
continue;
}
if except_node.is_some() && peer.their_node_id.as_ref() == except_node {
continue;
}
- self.enqueue_encoded_message(&mut *peer, &encoded_msg);
+ self.enqueue_encoded_gossip_broadcast(&mut *peer, &encoded_msg);
}
},
wire::Message::ChannelUpdate(ref msg) => {
!peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
continue
}
- if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP
- || peer.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO
- {
+ if peer.buffer_full_drop_gossip_broadcast() {
log_gossip!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
continue;
}
if except_node.is_some() && peer.their_node_id.as_ref() == except_node {
continue;
}
- self.enqueue_encoded_message(&mut *peer, &encoded_msg);
+ self.enqueue_encoded_gossip_broadcast(&mut *peer, &encoded_msg);
}
},
_ => debug_assert!(false, "We shouldn't attempt to forward anything but gossip messages"),
}
descriptor.disconnect_socket();
self.message_handler.chan_handler.peer_disconnected(&node_id, false);
+ self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
}
}
}
log_pubkey!(node_id), if no_connection_possible { "no " } else { "" });
self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
+ self.message_handler.onion_message_handler.peer_disconnected(&node_id, no_connection_possible);
}
}
};
log_trace!(self.logger, "Disconnecting peer with id {} due to client request", node_id);
peers_lock.remove(&descriptor);
self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
+ self.message_handler.onion_message_handler.peer_disconnected(&node_id, no_connection_possible);
descriptor.disconnect_socket();
}
}
if let Some(node_id) = peer.lock().unwrap().their_node_id {
log_trace!(self.logger, "Disconnecting peer with id {} due to client request to disconnect all peers", node_id);
self.message_handler.chan_handler.peer_disconnected(&node_id, false);
+ self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
}
descriptor.disconnect_socket();
}
log_trace!(self.logger, "Disconnecting peer with id {} due to ping timeout", node_id);
self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
self.message_handler.chan_handler.peer_disconnected(&node_id, false);
+ self.message_handler.onion_message_handler.peer_disconnected(&node_id, false);
}
}
}
cfgs
}
- fn create_network<'a>(peer_count: usize, cfgs: &'a Vec<PeerManagerCfg>) -> Vec<PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler>> {
+ fn create_network<'a>(peer_count: usize, cfgs: &'a Vec<PeerManagerCfg>) -> Vec<PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler>> {
let mut peers = Vec::new();
for i in 0..peer_count {
let node_secret = SecretKey::from_slice(&[42 + i as u8; 32]).unwrap();
let ephemeral_bytes = [i as u8; 32];
- let msg_handler = MessageHandler { chan_handler: &cfgs[i].chan_handler, route_handler: &cfgs[i].routing_handler };
+ let msg_handler = MessageHandler { chan_handler: &cfgs[i].chan_handler, route_handler: &cfgs[i].routing_handler, onion_message_handler: IgnoringMessageHandler {} };
let peer = PeerManager::new(msg_handler, node_secret, &ephemeral_bytes, &cfgs[i].logger, IgnoringMessageHandler {});
peers.push(peer);
}
peers
}
- fn establish_connection<'a>(peer_a: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler>, peer_b: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler>) -> (FileDescriptor, FileDescriptor) {
+ fn establish_connection<'a>(peer_a: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler>, peer_b: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler>) -> (FileDescriptor, FileDescriptor) {
let secp_ctx = Secp256k1::new();
let a_id = PublicKey::from_secret_key(&secp_ctx, &peer_a.our_node_secret);
let mut fd_a = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
// Check that each peer has received the expected number of channel updates and channel
// announcements.
- assert_eq!(cfgs[0].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 100);
- assert_eq!(cfgs[0].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 50);
- assert_eq!(cfgs[1].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 100);
- assert_eq!(cfgs[1].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 50);
+ assert_eq!(cfgs[0].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 108);
+ assert_eq!(cfgs[0].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 54);
+ assert_eq!(cfgs[1].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 108);
+ assert_eq!(cfgs[1].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 54);
}
#[test]
use prelude::*;
use core::mem;
+use bitcoin::hashes::Hash;
+use bitcoin::TxMerkleNode;
use ln::functional_test_utils::*;
check_added_monitors!(nodes[2], 1);
get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
- let mut header = BlockHeader { version: 0x2000_0000, prev_blockhash: nodes[2].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let mut header = BlockHeader { version: 0x2000_0000, prev_blockhash: nodes[2].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
let claim_txn = if local_commitment {
// Broadcast node 1 commitment txn to broadcast the HTLC-Timeout
let node_1_commitment_txn = get_local_commitment_txn!(nodes[1], chan_2.2);
assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0);
if claim {
- disconnect_blocks(&nodes[1], ANTI_REORG_DELAY - 2);
+ // Disconnect Node 1's HTLC-Timeout which was connected above
+ disconnect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
let block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+ header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
txdata: claim_txn,
};
connect_block(&nodes[1], &block);
} else {
// Confirm the timeout tx and check that we fail the HTLC backwards
let block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+ header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
txdata: vec![],
};
connect_block(&nodes[1], &block);
if script.is_p2pkh() || script.is_p2sh() || script.is_v0_p2wpkh() || script.is_v0_p2wsh() {
true
} else if features.supports_shutdown_anysegwit() {
- script.is_witness_program() && script.as_bytes()[0] != SEGWIT_V0.into_u8()
+ script.is_witness_program() && script.as_bytes()[0] != SEGWIT_V0.to_u8()
} else {
false
}
//! Wire encoding/decoding for Lightning messages according to [BOLT #1], and for
//! custom message through the [`CustomMessageReader`] trait.
-//!
+//!
//! [BOLT #1]: https://github.com/lightning/bolts/blob/master/01-messaging.md
use io;
ChannelReady(msgs::ChannelReady),
Shutdown(msgs::Shutdown),
ClosingSigned(msgs::ClosingSigned),
+ OnionMessage(msgs::OnionMessage),
UpdateAddHTLC(msgs::UpdateAddHTLC),
UpdateFulfillHTLC(msgs::UpdateFulfillHTLC),
UpdateFailHTLC(msgs::UpdateFailHTLC),
&Message::ChannelReady(ref msg) => msg.type_id(),
&Message::Shutdown(ref msg) => msg.type_id(),
&Message::ClosingSigned(ref msg) => msg.type_id(),
+ &Message::OnionMessage(ref msg) => msg.type_id(),
&Message::UpdateAddHTLC(ref msg) => msg.type_id(),
&Message::UpdateFulfillHTLC(ref msg) => msg.type_id(),
&Message::UpdateFailHTLC(ref msg) => msg.type_id(),
msgs::ClosingSigned::TYPE => {
Ok(Message::ClosingSigned(Readable::read(buffer)?))
},
+ msgs::OnionMessage::TYPE => {
+ Ok(Message::OnionMessage(Readable::read(buffer)?))
+ },
msgs::UpdateAddHTLC::TYPE => {
Ok(Message::UpdateAddHTLC(Readable::read(buffer)?))
},
const TYPE: u16 = 39;
}
+impl Encode for msgs::OnionMessage {
+ const TYPE: u16 = 513;
+}
+
impl Encode for msgs::UpdateAddHTLC {
const TYPE: u16 = 128;
}
use chain::keysinterface::{KeysInterface, Sign};
use super::utils;
+use ln::msgs::DecodeError;
use util::chacha20poly1305rfc::ChaChaPolyWriteAdapter;
-use util::ser::{VecWriter, Writeable, Writer};
+use util::ser::{Readable, VecWriter, Writeable, Writer};
-use core::iter::FromIterator;
use io;
use prelude::*;
writer.0
}
+impl Writeable for BlindedRoute {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ self.introduction_node_id.write(w)?;
+ self.blinding_point.write(w)?;
+ (self.blinded_hops.len() as u8).write(w)?;
+ for hop in &self.blinded_hops {
+ hop.write(w)?;
+ }
+ Ok(())
+ }
+}
+
+impl Readable for BlindedRoute {
+ fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let introduction_node_id = Readable::read(r)?;
+ let blinding_point = Readable::read(r)?;
+ let num_hops: u8 = Readable::read(r)?;
+ if num_hops == 0 { return Err(DecodeError::InvalidValue) }
+ let mut blinded_hops: Vec<BlindedHop> = Vec::with_capacity(num_hops.into());
+ for _ in 0..num_hops {
+ blinded_hops.push(Readable::read(r)?);
+ }
+ Ok(BlindedRoute {
+ introduction_node_id,
+ blinding_point,
+ blinded_hops,
+ })
+ }
+}
+
+impl_writeable!(BlindedHop, {
+ blinded_node_id,
+ encrypted_payload
+});
+
/// TLVs to encode in an intermediate onion message packet's hop data. When provided in a blinded
/// route, they are encoded into [`BlindedHop::encrypted_payload`].
pub(crate) struct ForwardTlvs {
//! Onion message testing and test utilities live here.
use chain::keysinterface::{KeysInterface, Recipient};
+use ln::features::InitFeatures;
+use ln::msgs::{self, OnionMessageHandler};
use super::{BlindedRoute, Destination, OnionMessenger, SendError};
use util::enforcing_trait_impls::EnforcingSigner;
use util::test_utils;
use bitcoin::network::constants::Network;
-use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
+use bitcoin::secp256k1::{PublicKey, Secp256k1};
use sync::Arc;
}
fn create_nodes(num_messengers: u8) -> Vec<MessengerNode> {
- let mut res = Vec::new();
+ let mut nodes = Vec::new();
for i in 0..num_messengers {
let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
let seed = [i as u8; 32];
let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&seed, Network::Testnet));
- res.push(MessengerNode {
+ nodes.push(MessengerNode {
keys_manager: keys_manager.clone(),
messenger: OnionMessenger::new(keys_manager, logger.clone()),
logger,
});
}
- res
+ for idx in 0..num_messengers - 1 {
+ let i = idx as usize;
+ let mut features = InitFeatures::known();
+ features.set_onion_messages_optional();
+ let init_msg = msgs::Init { features, remote_network_address: None };
+ nodes[i].messenger.peer_connected(&nodes[i + 1].get_node_pk(), &init_msg.clone());
+ nodes[i + 1].messenger.peer_connected(&nodes[i].get_node_pk(), &init_msg.clone());
+ }
+ nodes
}
-fn pass_along_path(mut path: Vec<MessengerNode>, expected_path_id: Option<[u8; 32]>) {
- let mut prev_node = path.remove(0);
+fn pass_along_path(path: &Vec<MessengerNode>, expected_path_id: Option<[u8; 32]>) {
+ let mut prev_node = &path[0];
let num_nodes = path.len();
- for (idx, node) in path.into_iter().enumerate() {
+ for (idx, node) in path.into_iter().skip(1).enumerate() {
let events = prev_node.messenger.release_pending_msgs();
- assert_eq!(events.len(), 1);
let onion_msg = {
let msgs = events.get(&node.get_node_pk()).unwrap();
assert_eq!(msgs.len(), 1);
fn one_hop() {
let nodes = create_nodes(2);
- nodes[0].messenger.send_onion_message(&[], Destination::Node(nodes[1].get_node_pk())).unwrap();
- pass_along_path(nodes, None);
+ nodes[0].messenger.send_onion_message(&[], Destination::Node(nodes[1].get_node_pk()), None).unwrap();
+ pass_along_path(&nodes, None);
}
#[test]
fn two_unblinded_hops() {
let nodes = create_nodes(3);
- nodes[0].messenger.send_onion_message(&[nodes[1].get_node_pk()], Destination::Node(nodes[2].get_node_pk())).unwrap();
- pass_along_path(nodes, None);
+ nodes[0].messenger.send_onion_message(&[nodes[1].get_node_pk()], Destination::Node(nodes[2].get_node_pk()), None).unwrap();
+ pass_along_path(&nodes, None);
}
#[test]
let secp_ctx = Secp256k1::new();
let blinded_route = BlindedRoute::new::<EnforcingSigner, _, _>(&[nodes[3].get_node_pk(), nodes[4].get_node_pk()], &*nodes[4].keys_manager, &secp_ctx).unwrap();
- nodes[0].messenger.send_onion_message(&[nodes[1].get_node_pk(), nodes[2].get_node_pk()], Destination::BlindedRoute(blinded_route)).unwrap();
- pass_along_path(nodes, None);
+ nodes[0].messenger.send_onion_message(&[nodes[1].get_node_pk(), nodes[2].get_node_pk()], Destination::BlindedRoute(blinded_route), None).unwrap();
+ pass_along_path(&nodes, None);
}
#[test]
let secp_ctx = Secp256k1::new();
let blinded_route = BlindedRoute::new::<EnforcingSigner, _, _>(&[nodes[1].get_node_pk(), nodes[2].get_node_pk(), nodes[3].get_node_pk()], &*nodes[3].keys_manager, &secp_ctx).unwrap();
- nodes[0].messenger.send_onion_message(&[], Destination::BlindedRoute(blinded_route)).unwrap();
- pass_along_path(nodes, None);
+ nodes[0].messenger.send_onion_message(&[], Destination::BlindedRoute(blinded_route), None).unwrap();
+ pass_along_path(&nodes, None);
}
#[test]
fn too_big_packet_error() {
// Make sure we error as expected if a packet is too big to send.
- let nodes = create_nodes(1);
-
- let hop_secret = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
- let secp_ctx = Secp256k1::new();
- let hop_node_id = PublicKey::from_secret_key(&secp_ctx, &hop_secret);
+ let nodes = create_nodes(2);
+ let hop_node_id = nodes[1].get_node_pk();
let hops = [hop_node_id; 400];
- let err = nodes[0].messenger.send_onion_message(&hops, Destination::Node(hop_node_id)).unwrap_err();
+ let err = nodes[0].messenger.send_onion_message(&hops, Destination::Node(hop_node_id), None).unwrap_err();
assert_eq!(err, SendError::TooBigPacket);
}
#[test]
fn invalid_blinded_route_error() {
// Make sure we error as expected if a provided blinded route has 0 or 1 hops.
- let mut nodes = create_nodes(3);
- let (node1, node2, node3) = (nodes.remove(0), nodes.remove(0), nodes.remove(0));
+ let nodes = create_nodes(3);
// 0 hops
let secp_ctx = Secp256k1::new();
- let mut blinded_route = BlindedRoute::new::<EnforcingSigner, _, _>(&[node2.get_node_pk(), node3.get_node_pk()], &*node3.keys_manager, &secp_ctx).unwrap();
+ let mut blinded_route = BlindedRoute::new::<EnforcingSigner, _, _>(&[nodes[1].get_node_pk(), nodes[2].get_node_pk()], &*nodes[2].keys_manager, &secp_ctx).unwrap();
blinded_route.blinded_hops.clear();
- let err = node1.messenger.send_onion_message(&[], Destination::BlindedRoute(blinded_route)).unwrap_err();
+ let err = nodes[0].messenger.send_onion_message(&[], Destination::BlindedRoute(blinded_route), None).unwrap_err();
assert_eq!(err, SendError::TooFewBlindedHops);
// 1 hop
- let mut blinded_route = BlindedRoute::new::<EnforcingSigner, _, _>(&[node2.get_node_pk(), node3.get_node_pk()], &*node3.keys_manager, &secp_ctx).unwrap();
+ let mut blinded_route = BlindedRoute::new::<EnforcingSigner, _, _>(&[nodes[1].get_node_pk(), nodes[2].get_node_pk()], &*nodes[2].keys_manager, &secp_ctx).unwrap();
blinded_route.blinded_hops.remove(0);
assert_eq!(blinded_route.blinded_hops.len(), 1);
- let err = node1.messenger.send_onion_message(&[], Destination::BlindedRoute(blinded_route)).unwrap_err();
+ let err = nodes[0].messenger.send_onion_message(&[], Destination::BlindedRoute(blinded_route), None).unwrap_err();
assert_eq!(err, SendError::TooFewBlindedHops);
}
+
+#[test]
+fn reply_path() {
+ let nodes = create_nodes(4);
+ let secp_ctx = Secp256k1::new();
+
+ // Destination::Node
+ let reply_path = BlindedRoute::new::<EnforcingSigner, _, _>(&[nodes[2].get_node_pk(), nodes[1].get_node_pk(), nodes[0].get_node_pk()], &*nodes[0].keys_manager, &secp_ctx).unwrap();
+ nodes[0].messenger.send_onion_message(&[nodes[1].get_node_pk(), nodes[2].get_node_pk()], Destination::Node(nodes[3].get_node_pk()), Some(reply_path)).unwrap();
+ pass_along_path(&nodes, None);
+ // Make sure the last node successfully decoded the reply path.
+ nodes[3].logger.assert_log_contains(
+ "lightning::onion_message::messenger".to_string(),
+ format!("Received an onion message with path_id: None and reply_path").to_string(), 1);
+
+ // Destination::BlindedRoute
+ let blinded_route = BlindedRoute::new::<EnforcingSigner, _, _>(&[nodes[1].get_node_pk(), nodes[2].get_node_pk(), nodes[3].get_node_pk()], &*nodes[3].keys_manager, &secp_ctx).unwrap();
+ let reply_path = BlindedRoute::new::<EnforcingSigner, _, _>(&[nodes[2].get_node_pk(), nodes[1].get_node_pk(), nodes[0].get_node_pk()], &*nodes[0].keys_manager, &secp_ctx).unwrap();
+
+ nodes[0].messenger.send_onion_message(&[], Destination::BlindedRoute(blinded_route), Some(reply_path)).unwrap();
+ pass_along_path(&nodes, None);
+ nodes[3].logger.assert_log_contains(
+ "lightning::onion_message::messenger".to_string(),
+ format!("Received an onion message with path_id: None and reply_path").to_string(), 2);
+}
+
+#[test]
+fn peer_buffer_full() {
+ let nodes = create_nodes(2);
+ for _ in 0..188 { // Based on MAX_PER_PEER_BUFFER_SIZE in OnionMessenger
+ nodes[0].messenger.send_onion_message(&[], Destination::Node(nodes[1].get_node_pk()), None).unwrap();
+ }
+ let err = nodes[0].messenger.send_onion_message(&[], Destination::Node(nodes[1].get_node_pk()), None).unwrap_err();
+ assert_eq!(err, SendError::BufferFull);
+}
use bitcoin::hashes::{Hash, HashEngine};
use bitcoin::hashes::hmac::{Hmac, HmacEngine};
use bitcoin::hashes::sha256::Hash as Sha256;
-use bitcoin::secp256k1::{self, PublicKey, Secp256k1, SecretKey};
+use bitcoin::secp256k1::{self, PublicKey, Scalar, Secp256k1, SecretKey};
use chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager, Recipient, Sign};
-use ln::msgs;
+use ln::msgs::{self, OnionMessageHandler};
use ln::onion_utils;
use super::blinded_route::{BlindedRoute, ForwardTlvs, ReceiveTlvs};
use super::packet::{BIG_PACKET_HOP_DATA_LEN, ForwardControlTlvs, Packet, Payload, ReceiveControlTlvs, SMALL_PACKET_HOP_DATA_LEN};
use super::utils;
+use util::events::OnionMessageProvider;
use util::logger::Logger;
+use util::ser::Writeable;
use core::ops::Deref;
use sync::{Arc, Mutex};
///
/// # Example
///
-// Needs to be `ignore` until the `onion_message` module is made public, otherwise this is a test
-// failure.
-/// ```ignore
+/// ```
/// # extern crate bitcoin;
/// # use bitcoin::hashes::_export::_core::time::Duration;
/// # use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
///
/// // Send an empty onion message to a node id.
/// let intermediate_hops = [hop_node_id1, hop_node_id2];
-/// onion_messenger.send_onion_message(&intermediate_hops, Destination::Node(destination_node_id));
+/// let reply_path = None;
+/// onion_messenger.send_onion_message(&intermediate_hops, Destination::Node(destination_node_id), reply_path);
///
/// // Create a blinded route to yourself, for someone to send an onion message to.
/// # let your_node_id = hop_node_id1;
///
/// // Send an empty onion message to a blinded route.
/// # let intermediate_hops = [hop_node_id1, hop_node_id2];
-/// onion_messenger.send_onion_message(&intermediate_hops, Destination::BlindedRoute(blinded_route));
+/// let reply_path = None;
+/// onion_messenger.send_onion_message(&intermediate_hops, Destination::BlindedRoute(blinded_route), reply_path);
/// ```
///
/// [offers]: <https://github.com/lightning/bolts/pull/798>
{
keys_manager: K,
logger: L,
- pending_messages: Mutex<HashMap<PublicKey, Vec<msgs::OnionMessage>>>,
+ pending_messages: Mutex<HashMap<PublicKey, VecDeque<msgs::OnionMessage>>>,
secp_ctx: Secp256k1<secp256k1::All>,
// Coming soon:
// invoice_handler: InvoiceHandler,
/// The provided [`Destination`] was an invalid [`BlindedRoute`], due to having fewer than two
/// blinded hops.
TooFewBlindedHops,
+ /// Our next-hop peer was offline or does not support onion message forwarding.
+ InvalidFirstHop,
+ /// Our next-hop peer's buffer was full or our total outbound buffer was full.
+ BufferFull,
}
impl<Signer: Sign, K: Deref, L: Deref> OnionMessenger<Signer, K, L>
/// Send an empty onion message to `destination`, routing it through `intermediate_nodes`.
/// See [`OnionMessenger`] for example usage.
- pub fn send_onion_message(&self, intermediate_nodes: &[PublicKey], destination: Destination) -> Result<(), SendError> {
+ pub fn send_onion_message(&self, intermediate_nodes: &[PublicKey], destination: Destination, reply_path: Option<BlindedRoute>) -> Result<(), SendError> {
if let Destination::BlindedRoute(BlindedRoute { ref blinded_hops, .. }) = destination {
if blinded_hops.len() < 2 {
return Err(SendError::TooFewBlindedHops);
}
};
let (packet_payloads, packet_keys) = packet_payloads_and_keys(
- &self.secp_ctx, intermediate_nodes, destination, &blinding_secret)
+ &self.secp_ctx, intermediate_nodes, destination, reply_path, &blinding_secret)
.map_err(|e| SendError::Secp256k1(e))?;
let prng_seed = self.keys_manager.get_secure_random_bytes();
- let onion_packet = construct_onion_message_packet(
+ let onion_routing_packet = construct_onion_message_packet(
packet_payloads, packet_keys, prng_seed).map_err(|()| SendError::TooBigPacket)?;
let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
- let pending_msgs = pending_per_peer_msgs.entry(introduction_node_id).or_insert(Vec::new());
- pending_msgs.push(
- msgs::OnionMessage {
- blinding_point,
- onion_routing_packet: onion_packet,
+ if outbound_buffer_full(&introduction_node_id, &pending_per_peer_msgs) { return Err(SendError::BufferFull) }
+ match pending_per_peer_msgs.entry(introduction_node_id) {
+ hash_map::Entry::Vacant(_) => Err(SendError::InvalidFirstHop),
+ hash_map::Entry::Occupied(mut e) => {
+ e.get_mut().push_back(msgs::OnionMessage { blinding_point, onion_routing_packet });
+ Ok(())
+ }
+ }
+ }
+
+ #[cfg(test)]
+ pub(super) fn release_pending_msgs(&self) -> HashMap<PublicKey, VecDeque<msgs::OnionMessage>> {
+ let mut pending_msgs = self.pending_messages.lock().unwrap();
+ let mut msgs = HashMap::new();
+ // We don't want to disconnect the peers by removing them entirely from the original map, so we
+ // swap the pending message buffers individually.
+ for (peer_node_id, pending_messages) in &mut *pending_msgs {
+ msgs.insert(*peer_node_id, core::mem::take(pending_messages));
+ }
+ msgs
+ }
+}
+
+fn outbound_buffer_full(peer_node_id: &PublicKey, buffer: &HashMap<PublicKey, VecDeque<msgs::OnionMessage>>) -> bool {
+ const MAX_TOTAL_BUFFER_SIZE: usize = (1 << 20) * 128;
+ const MAX_PER_PEER_BUFFER_SIZE: usize = (1 << 10) * 256;
+ let mut total_buffered_bytes = 0;
+ let mut peer_buffered_bytes = 0;
+ for (pk, peer_buf) in buffer {
+ for om in peer_buf {
+ let om_len = om.serialized_length();
+ if pk == peer_node_id {
+ peer_buffered_bytes += om_len;
+ }
+ total_buffered_bytes += om_len;
+
+ if total_buffered_bytes >= MAX_TOTAL_BUFFER_SIZE ||
+ peer_buffered_bytes >= MAX_PER_PEER_BUFFER_SIZE
+ {
+ return true
}
- );
- Ok(())
+ }
}
+ false
+}
+impl<Signer: Sign, K: Deref, L: Deref> OnionMessageHandler for OnionMessenger<Signer, K, L>
+ where K::Target: KeysInterface<Signer = Signer>,
+ L::Target: Logger,
+{
/// Handle an incoming onion message. Currently, if a message was destined for us we will log, but
/// soon we'll delegate the onion message to a handler that can generate invoices or send
/// payments.
- pub fn handle_onion_message(&self, _peer_node_id: &PublicKey, msg: &msgs::OnionMessage) {
+ fn handle_onion_message(&self, _peer_node_id: &PublicKey, msg: &msgs::OnionMessage) {
let control_tlvs_ss = match self.keys_manager.ecdh(Recipient::Node, &msg.blinding_point, None) {
Ok(ss) => ss,
Err(e) => {
Hmac::from_engine(hmac).into_inner()
};
match self.keys_manager.ecdh(Recipient::Node, &msg.onion_routing_packet.public_key,
- Some(&blinding_factor))
+ Some(&Scalar::from_be_bytes(blinding_factor).unwrap()))
{
Ok(ss) => ss.secret_bytes(),
Err(()) => {
msg.onion_routing_packet.hmac, control_tlvs_ss)
{
Ok((Payload::Receive {
- control_tlvs: ReceiveControlTlvs::Unblinded(ReceiveTlvs { path_id })
+ control_tlvs: ReceiveControlTlvs::Unblinded(ReceiveTlvs { path_id }), reply_path,
}, None)) => {
- log_info!(self.logger, "Received an onion message with path_id: {:02x?}", path_id);
+ log_info!(self.logger,
+ "Received an onion message with path_id: {:02x?} and {}reply_path",
+ path_id, if reply_path.is_some() { "" } else { "no " });
},
Ok((Payload::Forward(ForwardControlTlvs::Unblinded(ForwardTlvs {
next_node_id, next_blinding_override
hop_data: new_packet_bytes,
hmac: next_hop_hmac,
};
-
- let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
- let pending_msgs = pending_per_peer_msgs.entry(next_node_id).or_insert(Vec::new());
- pending_msgs.push(
- msgs::OnionMessage {
- blinding_point: match next_blinding_override {
- Some(blinding_point) => blinding_point,
- None => {
- let blinding_factor = {
- let mut sha = Sha256::engine();
- sha.input(&msg.blinding_point.serialize()[..]);
- sha.input(control_tlvs_ss.as_ref());
- Sha256::from_engine(sha).into_inner()
- };
- let mut next_blinding_point = msg.blinding_point;
- if let Err(e) = next_blinding_point.mul_assign(&self.secp_ctx, &blinding_factor[..]) {
+ let onion_message = msgs::OnionMessage {
+ blinding_point: match next_blinding_override {
+ Some(blinding_point) => blinding_point,
+ None => {
+ let blinding_factor = {
+ let mut sha = Sha256::engine();
+ sha.input(&msg.blinding_point.serialize()[..]);
+ sha.input(control_tlvs_ss.as_ref());
+ Sha256::from_engine(sha).into_inner()
+ };
+ let next_blinding_point = msg.blinding_point;
+ match next_blinding_point.mul_tweak(&self.secp_ctx, &Scalar::from_be_bytes(blinding_factor).unwrap()) {
+ Ok(bp) => bp,
+ Err(e) => {
log_trace!(self.logger, "Failed to compute next blinding point: {}", e);
return
}
- next_blinding_point
- },
+ }
},
- onion_routing_packet: outgoing_packet,
},
- );
+ onion_routing_packet: outgoing_packet,
+ };
+
+ let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
+ if outbound_buffer_full(&next_node_id, &pending_per_peer_msgs) {
+ log_trace!(self.logger, "Dropping forwarded onion message to peer {:?}: outbound buffer full", next_node_id);
+ return
+ }
+
+ #[cfg(fuzzing)]
+ pending_per_peer_msgs.entry(next_node_id).or_insert_with(VecDeque::new);
+
+ match pending_per_peer_msgs.entry(next_node_id) {
+ hash_map::Entry::Vacant(_) => {
+ log_trace!(self.logger, "Dropping forwarded onion message to disconnected peer {:?}", next_node_id);
+ return
+ },
+ hash_map::Entry::Occupied(mut e) => {
+ e.get_mut().push_back(onion_message);
+ log_trace!(self.logger, "Forwarding an onion message to peer {}", next_node_id);
+ }
+ };
},
Err(e) => {
log_trace!(self.logger, "Errored decoding onion message packet: {:?}", e);
};
}
- #[cfg(test)]
- pub(super) fn release_pending_msgs(&self) -> HashMap<PublicKey, Vec<msgs::OnionMessage>> {
+ fn peer_connected(&self, their_node_id: &PublicKey, init: &msgs::Init) {
+ if init.features.supports_onion_messages() {
+ let mut peers = self.pending_messages.lock().unwrap();
+ peers.insert(their_node_id.clone(), VecDeque::new());
+ }
+ }
+
+ fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
let mut pending_msgs = self.pending_messages.lock().unwrap();
- let mut msgs = HashMap::new();
- core::mem::swap(&mut *pending_msgs, &mut msgs);
- msgs
+ pending_msgs.remove(their_node_id);
+ }
+}
+
+impl<Signer: Sign, K: Deref, L: Deref> OnionMessageProvider for OnionMessenger<Signer, K, L>
+ where K::Target: KeysInterface<Signer = Signer>,
+ L::Target: Logger,
+{
+ fn next_onion_message_for_peer(&self, peer_node_id: PublicKey) -> Option<msgs::OnionMessage> {
+ let mut pending_msgs = self.pending_messages.lock().unwrap();
+ if let Some(msgs) = pending_msgs.get_mut(&peer_node_id) {
+ return msgs.pop_front()
+ }
+ None
}
}
/// Construct onion packet payloads and keys for sending an onion message along the given
/// `unblinded_path` to the given `destination`.
fn packet_payloads_and_keys<T: secp256k1::Signing + secp256k1::Verification>(
- secp_ctx: &Secp256k1<T>, unblinded_path: &[PublicKey], destination: Destination, session_priv: &SecretKey
+ secp_ctx: &Secp256k1<T>, unblinded_path: &[PublicKey], destination: Destination, mut reply_path:
+ Option<BlindedRoute>, session_priv: &SecretKey
) -> Result<(Vec<(Payload, [u8; 32])>, Vec<onion_utils::OnionKeys>), secp256k1::Error> {
let num_hops = unblinded_path.len() + destination.num_hops();
let mut payloads = Vec::with_capacity(num_hops);
} else if let Some(encrypted_payload) = enc_payload_opt {
payloads.push((Payload::Receive {
control_tlvs: ReceiveControlTlvs::Blinded(encrypted_payload),
+ reply_path: reply_path.take(),
}, control_tlvs_ss));
}
if let Some(control_tlvs_ss) = prev_control_tlvs_ss {
payloads.push((Payload::Receive {
- control_tlvs: ReceiveControlTlvs::Unblinded(ReceiveTlvs { path_id: None, })
+ control_tlvs: ReceiveControlTlvs::Unblinded(ReceiveTlvs { path_id: None, }),
+ reply_path: reply_path.take(),
}, control_tlvs_ss));
}
use ln::msgs::DecodeError;
use ln::onion_utils;
-use super::blinded_route::{ForwardTlvs, ReceiveTlvs};
+use super::blinded_route::{BlindedRoute, ForwardTlvs, ReceiveTlvs};
use util::chacha20poly1305rfc::{ChaChaPolyReadAdapter, ChaChaPolyWriteAdapter};
-use util::ser::{FixedLengthReader, LengthRead, LengthReadable, LengthReadableArgs, Readable, ReadableArgs, Writeable, Writer};
+use util::ser::{BigSize, FixedLengthReader, LengthRead, LengthReadable, LengthReadableArgs, Readable, ReadableArgs, Writeable, Writer};
use core::cmp;
use io::{self, Read};
let public_key = Readable::read(r)?;
let mut hop_data = Vec::new();
- let hop_data_len = r.total_bytes() as usize - 66; // 1 (version) + 33 (pubkey) + 32 (HMAC) = 66
+ let hop_data_len = r.total_bytes().saturating_sub(66) as usize; // 1 (version) + 33 (pubkey) + 32 (HMAC) = 66
let mut read_idx = 0;
while read_idx < hop_data_len {
let mut read_buffer = [0; READ_BUFFER_SIZE];
let read_amt = cmp::min(hop_data_len - read_idx, READ_BUFFER_SIZE);
- r.read_exact(&mut read_buffer[..read_amt]);
+ r.read_exact(&mut read_buffer[..read_amt])?;
hop_data.extend_from_slice(&read_buffer[..read_amt]);
read_idx += read_amt;
}
/// This payload is for the final hop.
Receive {
control_tlvs: ReceiveControlTlvs,
+ reply_path: Option<BlindedRoute>,
// Coming soon:
- // reply_path: Option<BlindedRoute>,
// message: Message,
}
}
impl Writeable for (Payload, [u8; 32]) {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
match &self.0 {
- Payload::Forward(ForwardControlTlvs::Blinded(encrypted_bytes)) |
- Payload::Receive { control_tlvs: ReceiveControlTlvs::Blinded(encrypted_bytes)} => {
+ Payload::Forward(ForwardControlTlvs::Blinded(encrypted_bytes)) => {
encode_varint_length_prefixed_tlv!(w, {
(4, encrypted_bytes, vec_type)
})
},
+ Payload::Receive {
+ control_tlvs: ReceiveControlTlvs::Blinded(encrypted_bytes), reply_path
+ } => {
+ encode_varint_length_prefixed_tlv!(w, {
+ (2, reply_path, option),
+ (4, encrypted_bytes, vec_type)
+ })
+ },
Payload::Forward(ForwardControlTlvs::Unblinded(control_tlvs)) => {
let write_adapter = ChaChaPolyWriteAdapter::new(self.1, &control_tlvs);
encode_varint_length_prefixed_tlv!(w, {
(4, write_adapter, required)
})
},
- Payload::Receive { control_tlvs: ReceiveControlTlvs::Unblinded(control_tlvs)} => {
+ Payload::Receive {
+ control_tlvs: ReceiveControlTlvs::Unblinded(control_tlvs), reply_path,
+ } => {
let write_adapter = ChaChaPolyWriteAdapter::new(self.1, &control_tlvs);
encode_varint_length_prefixed_tlv!(w, {
+ (2, reply_path, option),
(4, write_adapter, required)
})
},
// Uses the provided secret to simultaneously decode and decrypt the control TLVs.
impl ReadableArgs<SharedSecret> for Payload {
- fn read<R: Read>(mut r: &mut R, encrypted_tlvs_ss: SharedSecret) -> Result<Self, DecodeError> {
- use bitcoin::consensus::encode::{Decodable, Error, VarInt};
- let v: VarInt = Decodable::consensus_decode(&mut r)
- .map_err(|e| match e {
- Error::Io(ioe) => DecodeError::from(ioe),
- _ => DecodeError::InvalidValue
- })?;
-
+ fn read<R: Read>(r: &mut R, encrypted_tlvs_ss: SharedSecret) -> Result<Self, DecodeError> {
+ let v: BigSize = Readable::read(r)?;
let mut rd = FixedLengthReader::new(r, v.0);
- // TODO: support reply paths
- let mut _reply_path_bytes: Option<Vec<u8>> = Some(Vec::new());
+ let mut reply_path: Option<BlindedRoute> = None;
let mut read_adapter: Option<ChaChaPolyReadAdapter<ControlTlvs>> = None;
let rho = onion_utils::gen_rho_from_shared_secret(&encrypted_tlvs_ss.secret_bytes());
decode_tlv_stream!(&mut rd, {
- (2, _reply_path_bytes, vec_type),
+ (2, reply_path, option),
(4, read_adapter, (option: LengthReadableArgs, rho))
});
rd.eat_remaining().map_err(|_| DecodeError::ShortRead)?;
Ok(Payload::Forward(ForwardControlTlvs::Unblinded(tlvs)))
},
Some(ChaChaPolyReadAdapter { readable: ControlTlvs::Receive(tlvs)}) => {
- Ok(Payload::Receive { control_tlvs: ReceiveControlTlvs::Unblinded(tlvs)})
+ Ok(Payload::Receive { control_tlvs: ReceiveControlTlvs::Unblinded(tlvs), reply_path })
},
}
}
use bitcoin::hashes::{Hash, HashEngine};
use bitcoin::hashes::hmac::{Hmac, HmacEngine};
use bitcoin::hashes::sha256::Hash as Sha256;
-use bitcoin::secp256k1::{self, PublicKey, Secp256k1, SecretKey};
+use bitcoin::secp256k1::{self, PublicKey, Secp256k1, SecretKey, Scalar};
use bitcoin::secp256k1::ecdh::SharedSecret;
use ln::onion_utils;
hmac.input(encrypted_data_ss.as_ref());
Hmac::from_engine(hmac).into_inner()
};
- let mut unblinded_pk = $pk;
- unblinded_pk.mul_assign(secp_ctx, &hop_pk_blinding_factor)?;
- unblinded_pk
+ $pk.mul_tweak(secp_ctx, &Scalar::from_be_bytes(hop_pk_blinding_factor).unwrap())?
};
let onion_packet_ss = SharedSecret::new(&blinded_hop_pk, &onion_packet_pubkey_priv);
Sha256::from_engine(sha).into_inner()
};
- msg_blinding_point_priv.mul_assign(&msg_blinding_point_blinding_factor)?;
+ msg_blinding_point_priv = msg_blinding_point_priv.mul_tweak(&Scalar::from_be_bytes(msg_blinding_point_blinding_factor).unwrap())?;
msg_blinding_point = PublicKey::from_secret_key(secp_ctx, &msg_blinding_point_priv);
let onion_packet_pubkey_blinding_factor = {
sha.input(onion_packet_ss.as_ref());
Sha256::from_engine(sha).into_inner()
};
- onion_packet_pubkey_priv.mul_assign(&onion_packet_pubkey_blinding_factor)?;
+ onion_packet_pubkey_priv = onion_packet_pubkey_priv.mul_tweak(&Scalar::from_be_bytes(onion_packet_pubkey_blinding_factor).unwrap())?;
onion_packet_pubkey = PublicKey::from_secret_key(secp_ctx, &onion_packet_pubkey_priv);
};
}
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
use bitcoin::hashes::Hash;
-use bitcoin::blockdata::script::Builder;
use bitcoin::blockdata::transaction::TxOut;
-use bitcoin::blockdata::opcodes;
use bitcoin::hash_types::BlockHash;
use chain;
use chain::Access;
+use ln::chan_utils::make_funding_redeemscript;
use ln::features::{ChannelFeatures, NodeFeatures};
use ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT};
use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, GossipTimestampFilter};
use sync::{RwLock, RwLockReadGuard};
use core::sync::atomic::{AtomicUsize, Ordering};
use sync::Mutex;
-use core::ops::Deref;
+use core::ops::{Bound, Deref};
use bitcoin::hashes::hex::ToHex;
#[cfg(feature = "std")]
Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY)
}
- fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> {
- let mut result = Vec::with_capacity(batch_amount as usize);
+ fn get_next_channel_announcement(&self, starting_point: u64) -> Option<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> {
let channels = self.network_graph.channels.read().unwrap();
- let mut iter = channels.range(starting_point..);
- while result.len() < batch_amount as usize {
- if let Some((_, ref chan)) = iter.next() {
- if chan.announcement_message.is_some() {
- let chan_announcement = chan.announcement_message.clone().unwrap();
- let mut one_to_two_announcement: Option<msgs::ChannelUpdate> = None;
- let mut two_to_one_announcement: Option<msgs::ChannelUpdate> = None;
- if let Some(one_to_two) = chan.one_to_two.as_ref() {
- one_to_two_announcement = one_to_two.last_update_message.clone();
- }
- if let Some(two_to_one) = chan.two_to_one.as_ref() {
- two_to_one_announcement = two_to_one.last_update_message.clone();
- }
- result.push((chan_announcement, one_to_two_announcement, two_to_one_announcement));
- } else {
- // TODO: We may end up sending un-announced channel_updates if we are sending
- // initial sync data while receiving announce/updates for this channel.
+ for (_, ref chan) in channels.range(starting_point..) {
+ if chan.announcement_message.is_some() {
+ let chan_announcement = chan.announcement_message.clone().unwrap();
+ let mut one_to_two_announcement: Option<msgs::ChannelUpdate> = None;
+ let mut two_to_one_announcement: Option<msgs::ChannelUpdate> = None;
+ if let Some(one_to_two) = chan.one_to_two.as_ref() {
+ one_to_two_announcement = one_to_two.last_update_message.clone();
}
+ if let Some(two_to_one) = chan.two_to_one.as_ref() {
+ two_to_one_announcement = two_to_one.last_update_message.clone();
+ }
+ return Some((chan_announcement, one_to_two_announcement, two_to_one_announcement));
} else {
- return result;
+ // TODO: We may end up sending un-announced channel_updates if we are sending
+ // initial sync data while receiving announce/updates for this channel.
}
}
- result
+ None
}
- fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec<NodeAnnouncement> {
- let mut result = Vec::with_capacity(batch_amount as usize);
+ fn get_next_node_announcement(&self, starting_point: Option<&PublicKey>) -> Option<NodeAnnouncement> {
let nodes = self.network_graph.nodes.read().unwrap();
- let mut iter = if let Some(pubkey) = starting_point {
- let mut iter = nodes.range(NodeId::from_pubkey(pubkey)..);
- iter.next();
- iter
+ let iter = if let Some(pubkey) = starting_point {
+ nodes.range((Bound::Excluded(NodeId::from_pubkey(pubkey)), Bound::Unbounded))
} else {
- nodes.range::<NodeId, _>(..)
+ nodes.range(..)
};
- while result.len() < batch_amount as usize {
- if let Some((_, ref node)) = iter.next() {
- if let Some(node_info) = node.announcement_info.as_ref() {
- if node_info.announcement_message.is_some() {
- result.push(node_info.announcement_message.clone().unwrap());
- }
+ for (_, ref node) in iter {
+ if let Some(node_info) = node.announcement_info.as_ref() {
+ if let Some(msg) = node_info.announcement_message.clone() {
+ return Some(msg);
}
- } else {
- return result;
}
}
- result
+ None
}
/// Initiates a stateless sync of routing gossip information with a peer
///
/// While this may be smaller than the actual channel capacity, amounts greater than
/// [`Self::as_msat`] should not be routed through the channel.
-#[derive(Clone, Copy)]
+#[derive(Clone, Copy, Debug)]
pub enum EffectiveCapacity {
/// The available liquidity in the channel known from being a channel counterparty, and thus a
/// direct hop.
return Err(LightningError{err: "Channel announcement node had a channel with itself".to_owned(), action: ErrorAction::IgnoreError});
}
+ {
+ let channels = self.channels.read().unwrap();
+
+ if let Some(chan) = channels.get(&msg.short_channel_id) {
+ if chan.capacity_sats.is_some() {
+ // If we'd previously looked up the channel on-chain and checked the script
+ // against what appears on-chain, ignore the duplicate announcement.
+ //
+ // Because a reorg could replace one channel with another at the same SCID, if
+ // the channel appears to be different, we re-validate. This doesn't expose us
+ // to any more DoS risk than not, as a peer can always flood us with
+ // randomly-generated SCID values anyway.
+ //
+ // We use the Node IDs rather than the bitcoin_keys to check for "equivalence"
+ // as we didn't (necessarily) store the bitcoin keys, and we only really care
+ // if the peers on the channel changed anyway.
+ if NodeId::from_pubkey(&msg.node_id_1) == chan.node_one && NodeId::from_pubkey(&msg.node_id_2) == chan.node_two {
+ return Err(LightningError {
+ err: "Already have chain-validated channel".to_owned(),
+ action: ErrorAction::IgnoreDuplicateGossip
+ });
+ }
+ } else if chain_access.is_none() {
+ // Similarly, if we can't check the chain right now anyway, ignore the
+ // duplicate announcement without bothering to take the channels write lock.
+ return Err(LightningError {
+ err: "Already have non-chain-validated channel".to_owned(),
+ action: ErrorAction::IgnoreDuplicateGossip
+ });
+ }
+ }
+ }
+
let utxo_value = match &chain_access {
&None => {
// Tentatively accept, potentially exposing us to DoS attacks
&Some(ref chain_access) => {
match chain_access.get_utxo(&msg.chain_hash, msg.short_channel_id) {
Ok(TxOut { value, script_pubkey }) => {
- let expected_script = Builder::new().push_opcode(opcodes::all::OP_PUSHNUM_2)
- .push_slice(&msg.bitcoin_key_1.serialize())
- .push_slice(&msg.bitcoin_key_2.serialize())
- .push_opcode(opcodes::all::OP_PUSHNUM_2)
- .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_v0_p2wsh();
+ let expected_script =
+ make_funding_redeemscript(&msg.bitcoin_key_1, &msg.bitcoin_key_2).to_v0_p2wsh();
if script_pubkey != expected_script {
- return Err(LightningError{err: format!("Channel announcement key ({}) didn't match on-chain script ({})", script_pubkey.to_hex(), expected_script.to_hex()), action: ErrorAction::IgnoreError});
+ return Err(LightningError{err: format!("Channel announcement key ({}) didn't match on-chain script ({})", expected_script.to_hex(), script_pubkey.to_hex()), action: ErrorAction::IgnoreError});
}
//TODO: Check if value is worth storing, use it to inform routing, and compare it
//to the new HTLC max field in channel_update
#[cfg(test)]
mod tests {
use chain;
+ use ln::chan_utils::make_funding_redeemscript;
use ln::PaymentHash;
use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
use routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate, NodeAlias, MAX_EXCESS_BYTES_FOR_RELAY, NodeId, RoutingFees, ChannelUpdateInfo, ChannelInfo, NodeAnnouncementInfo, NodeInfo};
use bitcoin::hashes::Hash;
use bitcoin::network::constants::Network;
use bitcoin::blockdata::constants::genesis_block;
- use bitcoin::blockdata::script::{Builder, Script};
+ use bitcoin::blockdata::script::Script;
use bitcoin::blockdata::transaction::TxOut;
- use bitcoin::blockdata::opcodes;
use hex;
}
fn get_channel_script(secp_ctx: &Secp256k1<secp256k1::All>) -> Script {
- let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap();
- let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap();
- Builder::new().push_opcode(opcodes::all::OP_PUSHNUM_2)
- .push_slice(&PublicKey::from_secret_key(&secp_ctx, node_1_btckey).serialize())
- .push_slice(&PublicKey::from_secret_key(&secp_ctx, node_2_btckey).serialize())
- .push_opcode(opcodes::all::OP_PUSHNUM_2)
- .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script()
- .to_v0_p2wsh()
+ let node_1_btckey = SecretKey::from_slice(&[40; 32]).unwrap();
+ let node_2_btckey = SecretKey::from_slice(&[39; 32]).unwrap();
+ make_funding_redeemscript(&PublicKey::from_secret_key(secp_ctx, &node_1_btckey),
+ &PublicKey::from_secret_key(secp_ctx, &node_2_btckey)).to_v0_p2wsh()
}
fn get_signed_channel_update<F: Fn(&mut UnsignedChannelUpdate)>(f: F, node_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> ChannelUpdate {
// drop new one on the floor, since we can't see any changes.
match gossip_sync.handle_channel_announcement(&valid_announcement) {
Ok(_) => panic!(),
- Err(e) => assert_eq!(e.err, "Already have knowledge of channel")
+ Err(e) => assert_eq!(e.err, "Already have non-chain-validated channel")
};
// Test if an associated transaction were not on-chain (or not confirmed).
};
}
- // If we receive announcement for the same channel (but TX is not confirmed),
- // drop new one on the floor, since we can't see any changes.
- *chain_source.utxo_ret.lock().unwrap() = Err(chain::AccessError::UnknownTx);
- match gossip_sync.handle_channel_announcement(&valid_announcement) {
- Ok(_) => panic!(),
- Err(e) => assert_eq!(e.err, "Channel announced without corresponding UTXO entry")
- };
-
- // But if it is confirmed, replace the channel
+ // If we receive announcement for the same channel, once we've validated it against the
+ // chain, we simply ignore all new (duplicate) announcements.
*chain_source.utxo_ret.lock().unwrap() = Ok(TxOut { value: 0, script_pubkey: good_script });
- let valid_announcement = get_signed_channel_announcement(|unsigned_announcement| {
- unsigned_announcement.features = ChannelFeatures::empty();
- unsigned_announcement.short_channel_id += 2;
- }, node_1_privkey, node_2_privkey, &secp_ctx);
match gossip_sync.handle_channel_announcement(&valid_announcement) {
- Ok(res) => assert!(res),
- _ => panic!()
+ Ok(_) => panic!(),
+ Err(e) => assert_eq!(e.err, "Already have chain-validated channel")
};
- {
- match network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id) {
- Some(channel_entry) => {
- assert_eq!(channel_entry.features, ChannelFeatures::empty());
- },
- _ => panic!()
- };
- }
// Don't relay valid channels with excess data
let valid_announcement = get_signed_channel_announcement(|unsigned_announcement| {
let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
// Channels were not announced yet.
- let channels_with_announcements = gossip_sync.get_next_channel_announcements(0, 1);
- assert_eq!(channels_with_announcements.len(), 0);
+ let channels_with_announcements = gossip_sync.get_next_channel_announcement(0);
+ assert!(channels_with_announcements.is_none());
let short_channel_id;
{
}
// Contains initial channel announcement now.
- let channels_with_announcements = gossip_sync.get_next_channel_announcements(short_channel_id, 1);
- assert_eq!(channels_with_announcements.len(), 1);
- if let Some(channel_announcements) = channels_with_announcements.first() {
- let &(_, ref update_1, ref update_2) = channel_announcements;
+ let channels_with_announcements = gossip_sync.get_next_channel_announcement(short_channel_id);
+ if let Some(channel_announcements) = channels_with_announcements {
+ let (_, ref update_1, ref update_2) = channel_announcements;
assert_eq!(update_1, &None);
assert_eq!(update_2, &None);
} else {
panic!();
}
-
{
// Valid channel update
let valid_channel_update = get_signed_channel_update(|unsigned_channel_update| {
}
// Now contains an initial announcement and an update.
- let channels_with_announcements = gossip_sync.get_next_channel_announcements(short_channel_id, 1);
- assert_eq!(channels_with_announcements.len(), 1);
- if let Some(channel_announcements) = channels_with_announcements.first() {
- let &(_, ref update_1, ref update_2) = channel_announcements;
+ let channels_with_announcements = gossip_sync.get_next_channel_announcement(short_channel_id);
+ if let Some(channel_announcements) = channels_with_announcements {
+ let (_, ref update_1, ref update_2) = channel_announcements;
assert_ne!(update_1, &None);
assert_eq!(update_2, &None);
} else {
}
// Test that announcements with excess data won't be returned
- let channels_with_announcements = gossip_sync.get_next_channel_announcements(short_channel_id, 1);
- assert_eq!(channels_with_announcements.len(), 1);
- if let Some(channel_announcements) = channels_with_announcements.first() {
- let &(_, ref update_1, ref update_2) = channel_announcements;
+ let channels_with_announcements = gossip_sync.get_next_channel_announcement(short_channel_id);
+ if let Some(channel_announcements) = channels_with_announcements {
+ let (_, ref update_1, ref update_2) = channel_announcements;
assert_eq!(update_1, &None);
assert_eq!(update_2, &None);
} else {
}
// Further starting point have no channels after it
- let channels_with_announcements = gossip_sync.get_next_channel_announcements(short_channel_id + 1000, 1);
- assert_eq!(channels_with_announcements.len(), 0);
+ let channels_with_announcements = gossip_sync.get_next_channel_announcement(short_channel_id + 1000);
+ assert!(channels_with_announcements.is_none());
}
#[test]
let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
// No nodes yet.
- let next_announcements = gossip_sync.get_next_node_announcements(None, 10);
- assert_eq!(next_announcements.len(), 0);
+ let next_announcements = gossip_sync.get_next_node_announcement(None);
+ assert!(next_announcements.is_none());
{
// Announce a channel to add 2 nodes
};
}
-
// Nodes were never announced
- let next_announcements = gossip_sync.get_next_node_announcements(None, 3);
- assert_eq!(next_announcements.len(), 0);
+ let next_announcements = gossip_sync.get_next_node_announcement(None);
+ assert!(next_announcements.is_none());
{
let valid_announcement = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx);
};
}
- let next_announcements = gossip_sync.get_next_node_announcements(None, 3);
- assert_eq!(next_announcements.len(), 2);
+ let next_announcements = gossip_sync.get_next_node_announcement(None);
+ assert!(next_announcements.is_some());
// Skip the first node.
- let next_announcements = gossip_sync.get_next_node_announcements(Some(&node_id_1), 2);
- assert_eq!(next_announcements.len(), 1);
+ let next_announcements = gossip_sync.get_next_node_announcement(Some(&node_id_1));
+ assert!(next_announcements.is_some());
{
// Later announcement which should not be relayed (excess data) prevent us from sharing a node
};
}
- let next_announcements = gossip_sync.get_next_node_announcements(Some(&node_id_1), 2);
- assert_eq!(next_announcements.len(), 0);
+ let next_announcements = gossip_sync.get_next_node_announcement(Some(&node_id_1));
+ assert!(next_announcements.is_none());
}
#[test]
let legacy_chan_update_info_with_none: Vec<u8> = hex::decode("2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c0100").unwrap();
let read_chan_update_info_res: Result<ChannelUpdateInfo, ::ln::msgs::DecodeError> = ::util::ser::Readable::read(&mut legacy_chan_update_info_with_none.as_slice());
assert!(read_chan_update_info_res.is_err());
-
+
// 2. Test encoding/decoding of ChannelInfo
// Check we can encode/decode ChannelInfo without ChannelUpdateInfo fields present.
let chan_info_none_updates = ChannelInfo {
}
/// Proposed use of a channel passed as a parameter to [`Score::channel_penalty_msat`].
-#[derive(Clone, Copy)]
+#[derive(Clone, Copy, Debug)]
pub struct ChannelUsage {
/// The amount to send through the channel, denominated in millisatoshis.
pub amount_msat: u64,
pub(super) fn encrypt_in_place(&mut self, _input_output: &mut [u8]) {
assert!(self.finished == false);
- self.finished = true;
}
pub(super) fn finish_and_get_tag(&mut self, out_tag: &mut [u8]) {
+ assert!(self.finished == false);
out_tag.copy_from_slice(&self.tag);
self.finished = true;
}
use util::ser::{BigSize, FixedLengthReader, Writeable, Writer, MaybeReadable, Readable, VecReadWrapper, VecWriteWrapper};
use routing::router::{RouteHop, RouteParameters};
-use bitcoin::Transaction;
+use bitcoin::{PackedLockTime, Transaction};
use bitcoin::blockdata::script::Script;
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
11u8 => {
let f = || {
let mut channel_id = [0; 32];
- let mut transaction = Transaction{ version: 2, lock_time: 0, input: Vec::new(), output: Vec::new() };
+ let mut transaction = Transaction{ version: 2, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() };
read_tlv_fields!(reader, {
(0, channel_id, required),
(2, transaction, required),
fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent>;
}
+/// A trait indicating an object may generate onion messages to send
+pub trait OnionMessageProvider {
+ /// Gets the next pending onion message for the peer with the given node id.
+ fn next_onion_message_for_peer(&self, peer_node_id: PublicKey) -> Option<msgs::OnionMessage>;
+}
+
/// A trait indicating an object may generate events.
///
/// Events are processed by passing an [`EventHandler`] to [`process_pending_events`].
fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
if self.0.input.len() >= 1 && self.0.input.iter().any(|i| !i.witness.is_empty()) {
if self.0.input.len() == 1 && self.0.input[0].witness.last().unwrap().len() == 71 &&
- (self.0.input[0].sequence >> 8*3) as u8 == 0x80 {
+ (self.0.input[0].sequence.0 >> 8*3) as u8 == 0x80 {
write!(f, "commitment tx ")?;
} else if self.0.input.len() == 1 && self.0.input[0].witness.last().unwrap().len() == 71 {
write!(f, "closing tx ")?;
}
/// Logs an entry at the given level.
+#[doc(hidden)]
#[macro_export]
macro_rules! log_given_level {
($logger: expr, $lvl:expr, $($arg:tt)+) => (
);
}
-/// Log an error.
+/// Log at the `ERROR` level.
#[macro_export]
macro_rules! log_error {
($logger: expr, $($arg:tt)*) => (
)
}
+/// Log at the `WARN` level.
+#[macro_export]
macro_rules! log_warn {
($logger: expr, $($arg:tt)*) => (
log_given_level!($logger, $crate::util::logger::Level::Warn, $($arg)*);
)
}
+/// Log at the `INFO` level.
+#[macro_export]
macro_rules! log_info {
($logger: expr, $($arg:tt)*) => (
log_given_level!($logger, $crate::util::logger::Level::Info, $($arg)*);
)
}
+/// Log at the `DEBUG` level.
+#[macro_export]
macro_rules! log_debug {
($logger: expr, $($arg:tt)*) => (
log_given_level!($logger, $crate::util::logger::Level::Debug, $($arg)*);
)
}
-/// Log a trace log.
+/// Log at the `TRACE` level.
#[macro_export]
macro_rules! log_trace {
($logger: expr, $($arg:tt)*) => (
)
}
-/// Log a gossip log.
+/// Log at the `GOSSIP` level.
+#[macro_export]
macro_rules! log_gossip {
($logger: expr, $($arg:tt)*) => (
log_given_level!($logger, $crate::util::logger::Level::Gossip, $($arg)*);
/// variable-length integer which is simply truncated by skipping high zero bytes. This type
/// encapsulates such integers implementing Readable/Writeable for them.
#[cfg_attr(test, derive(PartialEq, Debug))]
-pub(crate) struct HighZeroBytesDroppedVarInt<T>(pub T);
+pub(crate) struct HighZeroBytesDroppedBigSize<T>(pub T);
macro_rules! impl_writeable_primitive {
($val_type:ty, $len: expr) => {
writer.write_all(&self.to_be_bytes())
}
}
- impl Writeable for HighZeroBytesDroppedVarInt<$val_type> {
+ impl Writeable for HighZeroBytesDroppedBigSize<$val_type> {
#[inline]
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
// Skip any full leading 0 bytes when writing (in BE):
Ok(<$val_type>::from_be_bytes(buf))
}
}
- impl Readable for HighZeroBytesDroppedVarInt<$val_type> {
+ impl Readable for HighZeroBytesDroppedBigSize<$val_type> {
#[inline]
- fn read<R: Read>(reader: &mut R) -> Result<HighZeroBytesDroppedVarInt<$val_type>, DecodeError> {
+ fn read<R: Read>(reader: &mut R) -> Result<HighZeroBytesDroppedBigSize<$val_type>, DecodeError> {
// We need to accept short reads (read_len == 0) as "EOF" and handle them as simply
// the high bytes being dropped. To do so, we start reading into the middle of buf
// and then convert the appropriate number of bytes with extra high bytes out of
let first_byte = $len - ($len - total_read_len);
let mut bytes = [0; $len];
bytes.copy_from_slice(&buf[first_byte..first_byte + $len]);
- Ok(HighZeroBytesDroppedVarInt(<$val_type>::from_be_bytes(bytes)))
+ Ok(HighZeroBytesDroppedBigSize(<$val_type>::from_be_bytes(bytes)))
} else {
// If the encoding had extra zero bytes, return a failure even though we know
// what they meant (as the TLV test vectors require this)
($bitcoin_type: ty) => {
impl Writeable for $bitcoin_type {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
- match self.consensus_encode(WriterWriteAdaptor(writer)) {
+ match self.consensus_encode(&mut WriterWriteAdaptor(writer)) {
Ok(_) => Ok(()),
Err(e) => Err(e),
}
use io::{self, Cursor};
use prelude::*;
use ln::msgs::DecodeError;
- use util::ser::{Writeable, HighZeroBytesDroppedVarInt, VecWriter};
+ use util::ser::{Writeable, HighZeroBytesDroppedBigSize, VecWriter};
use bitcoin::secp256k1::PublicKey;
// The BOLT TLV test cases don't include any tests which use our "required-value" logic since
}
// BOLT TLV test cases
- fn tlv_reader_n1(s: &[u8]) -> Result<(Option<HighZeroBytesDroppedVarInt<u64>>, Option<u64>, Option<(PublicKey, u64, u64)>, Option<u16>), DecodeError> {
+ fn tlv_reader_n1(s: &[u8]) -> Result<(Option<HighZeroBytesDroppedBigSize<u64>>, Option<u64>, Option<(PublicKey, u64, u64)>, Option<u16>), DecodeError> {
let mut s = Cursor::new(s);
- let mut tlv1: Option<HighZeroBytesDroppedVarInt<u64>> = None;
+ let mut tlv1: Option<HighZeroBytesDroppedBigSize<u64>> = None;
let mut tlv2: Option<u64> = None;
let mut tlv3: Option<(PublicKey, u64, u64)> = None;
let mut tlv4: Option<u16> = None;
assert_eq!(stream.0, ::hex::decode("06fd00ff02abcd").unwrap());
stream.0.clear();
- encode_varint_length_prefixed_tlv!(&mut stream, {(0, 1u64, required), (42, None::<u64>, option), (0xff, HighZeroBytesDroppedVarInt(0u64), required)});
+ encode_varint_length_prefixed_tlv!(&mut stream, {(0, 1u64, required), (42, None::<u64>, option), (0xff, HighZeroBytesDroppedBigSize(0u64), required)});
assert_eq!(stream.0, ::hex::decode("0e00080000000000000001fd00ff00").unwrap());
stream.0.clear();
- encode_varint_length_prefixed_tlv!(&mut stream, {(0, Some(1u64), option), (0xff, HighZeroBytesDroppedVarInt(0u64), required)});
+ encode_varint_length_prefixed_tlv!(&mut stream, {(0, Some(1u64), option), (0xff, HighZeroBytesDroppedBigSize(0u64), required)});
assert_eq!(stream.0, ::hex::decode("0e00080000000000000001fd00ff00").unwrap());
Ok(())
use bitcoin::network::constants::Network;
use bitcoin::hash_types::{BlockHash, Txid};
-use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1, ecdsa::Signature};
+use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1, ecdsa::Signature, Scalar};
use bitcoin::secp256k1::ecdh::SharedSecret;
use bitcoin::secp256k1::ecdsa::RecoverableSignature;
use core::time::Duration;
use sync::{Mutex, Arc};
use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
-use core::{cmp, mem};
+use core::mem;
use bitcoin::bech32::u5;
use chain::keysinterface::{InMemorySigner, Recipient, KeyMaterial};
#[cfg(feature = "std")]
use std::time::{SystemTime, UNIX_EPOCH};
+use bitcoin::Sequence;
pub struct TestVecWriter(pub Vec<u8>);
impl Writer for TestVecWriter {
type Signer = EnforcingSigner;
fn get_node_secret(&self, _recipient: Recipient) -> Result<SecretKey, ()> { unreachable!(); }
- fn ecdh(&self, _recipient: Recipient, _other_key: &PublicKey, _tweak: Option<&[u8; 32]>) -> Result<SharedSecret, ()> { unreachable!(); }
+ fn ecdh(&self, _recipient: Recipient, _other_key: &PublicKey, _tweak: Option<&Scalar>) -> Result<SharedSecret, ()> { unreachable!(); }
fn get_inbound_payment_key_material(&self) -> KeyMaterial { unreachable!(); }
fn get_destination_script(&self) -> Script { unreachable!(); }
fn get_shutdown_scriptpubkey(&self) -> ShutdownScript { unreachable!(); }
impl chaininterface::BroadcasterInterface for TestBroadcaster {
fn broadcast_transaction(&self, tx: &Transaction) {
- assert!(tx.lock_time < 1_500_000_000);
- if tx.lock_time > self.blocks.lock().unwrap().len() as u32 + 1 && tx.lock_time < 500_000_000 {
+ let lock_time = tx.lock_time.0;
+ assert!(lock_time < 1_500_000_000);
+ if lock_time > self.blocks.lock().unwrap().len() as u32 + 1 && lock_time < 500_000_000 {
for inp in tx.input.iter() {
- if inp.sequence != 0xffffffff {
+ if inp.sequence != Sequence::MAX {
panic!("We should never broadcast a transaction before its locktime ({})!", tx.lock_time);
}
}
self.chan_upds_recvd.fetch_add(1, Ordering::AcqRel);
Err(msgs::LightningError { err: "".to_owned(), action: msgs::ErrorAction::IgnoreError })
}
- fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> {
- let mut chan_anns = Vec::new();
- const TOTAL_UPDS: u64 = 50;
- let end: u64 = cmp::min(starting_point + batch_amount as u64, TOTAL_UPDS);
- for i in starting_point..end {
- let chan_upd_1 = get_dummy_channel_update(i);
- let chan_upd_2 = get_dummy_channel_update(i);
- let chan_ann = get_dummy_channel_announcement(i);
+ fn get_next_channel_announcement(&self, starting_point: u64) -> Option<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> {
+ let chan_upd_1 = get_dummy_channel_update(starting_point);
+ let chan_upd_2 = get_dummy_channel_update(starting_point);
+ let chan_ann = get_dummy_channel_announcement(starting_point);
- chan_anns.push((chan_ann, Some(chan_upd_1), Some(chan_upd_2)));
- }
-
- chan_anns
+ Some((chan_ann, Some(chan_upd_1), Some(chan_upd_2)))
}
- fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec<msgs::NodeAnnouncement> {
- Vec::new()
+ fn get_next_node_announcement(&self, _starting_point: Option<&PublicKey>) -> Option<msgs::NodeAnnouncement> {
+ None
}
fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &msgs::Init) {
fn get_node_secret(&self, recipient: Recipient) -> Result<SecretKey, ()> {
self.backing.get_node_secret(recipient)
}
- fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&[u8; 32]>) -> Result<SharedSecret, ()> {
+ fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result<SharedSecret, ()> {
self.backing.ecdh(recipient, other_key, tweak)
}
fn get_inbound_payment_key_material(&self) -> keysinterface::KeyMaterial {
pub utxo_ret: Mutex<Result<TxOut, chain::AccessError>>,
pub watched_txn: Mutex<HashSet<(Txid, Script)>>,
pub watched_outputs: Mutex<HashSet<(OutPoint, Script)>>,
- expectations: Mutex<Option<VecDeque<OnRegisterOutput>>>,
}
impl TestChainSource {
utxo_ret: Mutex::new(Ok(TxOut { value: u64::max_value(), script_pubkey })),
watched_txn: Mutex::new(HashSet::new()),
watched_outputs: Mutex::new(HashSet::new()),
- expectations: Mutex::new(None),
}
}
-
- /// Sets an expectation that [`chain::Filter::register_output`] is called.
- pub fn expect(&self, expectation: OnRegisterOutput) -> &Self {
- self.expectations.lock().unwrap()
- .get_or_insert_with(|| VecDeque::new())
- .push_back(expectation);
- self
- }
}
impl chain::Access for TestChainSource {
self.watched_txn.lock().unwrap().insert((*txid, script_pubkey.clone()));
}
- fn register_output(&self, output: WatchedOutput) -> Option<(usize, Transaction)> {
- let dependent_tx = match &mut *self.expectations.lock().unwrap() {
- None => None,
- Some(expectations) => match expectations.pop_front() {
- None => {
- panic!("Unexpected register_output: {:?}",
- (output.outpoint, output.script_pubkey));
- },
- Some(expectation) => {
- assert_eq!(output.outpoint, expectation.outpoint());
- assert_eq!(&output.script_pubkey, expectation.script_pubkey());
- expectation.returns
- },
- },
- };
-
+ fn register_output(&self, output: WatchedOutput) {
self.watched_outputs.lock().unwrap().insert((output.outpoint, output.script_pubkey));
- dependent_tx
}
}
if panicking() {
return;
}
-
- if let Some(expectations) = &*self.expectations.lock().unwrap() {
- if !expectations.is_empty() {
- panic!("Unsatisfied expectations: {:?}", expectations);
- }
- }
- }
-}
-
-/// An expectation that [`chain::Filter::register_output`] was called with a transaction output and
-/// returns an optional dependent transaction that spends the output in the same block.
-pub struct OnRegisterOutput {
- /// The transaction output to register.
- pub with: TxOutReference,
-
- /// A dependent transaction spending the output along with its position in the block.
- pub returns: Option<(usize, Transaction)>,
-}
-
-/// A transaction output as identified by an index into a transaction's output list.
-pub struct TxOutReference(pub Transaction, pub usize);
-
-impl OnRegisterOutput {
- fn outpoint(&self) -> OutPoint {
- let txid = self.with.0.txid();
- let index = self.with.1 as u16;
- OutPoint { txid, index }
- }
-
- fn script_pubkey(&self) -> &Script {
- let index = self.with.1;
- &self.with.0.output[index].script_pubkey
- }
-}
-
-impl core::fmt::Debug for OnRegisterOutput {
- fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
- f.debug_struct("OnRegisterOutput")
- .field("outpoint", &self.outpoint())
- .field("script_pubkey", self.script_pubkey())
- .finish()
}
}
}
fn duration_since(&self, earlier: Self) -> Duration {
- self.duration_since(earlier)
+ // On rust prior to 1.60 `Instant::duration_since` will panic if time goes backwards.
+ // However, we support rust versions prior to 1.60 and some users appear to have "monotonic
+ // clocks" that go backwards in practice (likely relatively ancient kernels/etc). Thus, we
+ // manually check for time going backwards here and return a duration of zero in that case.
+ let now = Self::now();
+ if now > earlier { now - earlier } else { Duration::from_secs(0) }
}
fn duration_since_epoch() -> Duration {
weight_with_change += (VarInt(tx.output.len() as u64 + 1).len() - VarInt(tx.output.len() as u64).len()) as i64 * 4;
// When calculating weight, add two for the flag bytes
let change_value: i64 = (input_value - output_value) as i64 - weight_with_change * feerate_sat_per_1000_weight as i64 / 1000;
- if change_value >= dust_value.as_sat() as i64 {
+ if change_value >= dust_value.to_sat() as i64 {
change_output.value = change_value as u64;
tx.output.push(change_output);
Ok(weight_with_change as usize)
use bitcoin::blockdata::script::{Script, Builder};
use bitcoin::hash_types::{PubkeyHash, Txid};
- use bitcoin::hashes::sha256d::Hash as Sha256dHash;
use bitcoin::hashes::Hash;
- use bitcoin::Witness;
+ use bitcoin::{PackedLockTime, Sequence, Witness};
use hex::decode;
#[test]
fn test_tx_value_overrun() {
// If we have a bogus input amount or outputs valued more than inputs, we should fail
- let mut tx = Transaction { version: 2, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+ let mut tx = Transaction { version: 2, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
script_pubkey: Script::new(), value: 1000
}] };
assert!(maybe_add_change_output(&mut tx, 21_000_000_0000_0001, 0, 253, Script::new()).is_err());
#[test]
fn test_tx_change_edge() {
// Check that we never add dust outputs
- let mut tx = Transaction { version: 2, lock_time: 0, input: Vec::new(), output: Vec::new() };
+ let mut tx = Transaction { version: 2, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() };
let orig_wtxid = tx.wtxid();
let output_spk = Script::new_p2pkh(&PubkeyHash::hash(&[0; 0]));
- assert_eq!(output_spk.dust_value().as_sat(), 546);
+ assert_eq!(output_spk.dust_value().to_sat(), 546);
// 9 sats isn't enough to pay fee on a dummy transaction...
assert_eq!(tx.weight() as u64, 40); // ie 10 vbytes
assert!(maybe_add_change_output(&mut tx, 9, 0, 250, output_spk.clone()).is_err());
#[test]
fn test_tx_extra_outputs() {
// Check that we correctly handle existing outputs
- let mut tx = Transaction { version: 2, lock_time: 0, input: vec![TxIn {
- previous_output: OutPoint::new(Txid::from_hash(Sha256dHash::default()), 0), script_sig: Script::new(), witness: Witness::new(), sequence: 0,
+ let mut tx = Transaction { version: 2, lock_time: PackedLockTime::ZERO, input: vec![TxIn {
+ previous_output: OutPoint::new(Txid::all_zeros(), 0), script_sig: Script::new(), witness: Witness::new(), sequence: Sequence::ZERO,
}], output: vec![TxOut {
script_pubkey: Builder::new().push_int(1).into_script(), value: 1000
}] };
let orig_weight = tx.weight();
assert_eq!(orig_weight / 4, 61);
- assert_eq!(Builder::new().push_int(2).into_script().dust_value().as_sat(), 474);
+ assert_eq!(Builder::new().push_int(2).into_script().dust_value().to_sat(), 474);
// Input value of the output value + fee - 1 should fail:
assert!(maybe_add_change_output(&mut tx, 1000 + 61 + 100 - 1, 400, 250, Builder::new().push_int(2).into_script()).is_err());