Updated `ClosureReason::HolderForceClosed` with whether txn was broadcasted.
find . -name '*.rs' -type f |sort >$TMP_FILE
for file in $(comm -23 $TMP_FILE rustfmt_excluded_files); do
echo "Checking formatting of $file"
- rustfmt $VERS --check $file
+ rustfmt $VERS --edition 2021 --check $file
done
if let Ok(s) = std::str::from_utf8(data) {
let first_decoding = base32::Alphabet::RFC4648 { padding: true }.decode(s);
if let Ok(first_decoding) = first_decoding {
- let encoding_response = base32::Alphabet::RFC4648 { padding: true }.encode(&first_decoding);
+ let encoding_response =
+ base32::Alphabet::RFC4648 { padding: true }.encode(&first_decoding);
assert_eq!(encoding_response, s.to_ascii_uppercase());
- let second_decoding = base32::Alphabet::RFC4648 { padding: true }.decode(&encoding_response).unwrap();
+ let second_decoding =
+ base32::Alphabet::RFC4648 { padding: true }.decode(&encoding_response).unwrap();
assert_eq!(first_decoding, second_decoding);
}
}
if let Ok(s) = std::str::from_utf8(data) {
let first_decoding = base32::Alphabet::RFC4648 { padding: false }.decode(s);
if let Ok(first_decoding) = first_decoding {
- let encoding_response = base32::Alphabet::RFC4648 { padding: false }.encode(&first_decoding);
+ let encoding_response =
+ base32::Alphabet::RFC4648 { padding: false }.encode(&first_decoding);
assert_eq!(encoding_response, s.to_ascii_uppercase());
- let second_decoding = base32::Alphabet::RFC4648 { padding: false }.decode(&encoding_response).unwrap();
+ let second_decoding =
+ base32::Alphabet::RFC4648 { padding: false }.decode(&encoding_response).unwrap();
assert_eq!(first_decoding, second_decoding);
}
}
-
+
let encode_response = base32::Alphabet::RFC4648 { padding: false }.encode(&data);
- let decode_response = base32::Alphabet::RFC4648 { padding: false }.decode(&encode_response).unwrap();
+ let decode_response =
+ base32::Alphabet::RFC4648 { padding: false }.decode(&encode_response).unwrap();
assert_eq!(data, decode_response);
let encode_response = base32::Alphabet::RFC4648 { padding: true }.encode(&data);
- let decode_response = base32::Alphabet::RFC4648 { padding: true }.decode(&encode_response).unwrap();
+ let decode_response =
+ base32::Alphabet::RFC4648 { padding: true }.decode(&encode_response).unwrap();
assert_eq!(data, decode_response);
}
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
// To modify it, modify target_template.txt and run gen_target.sh instead.
#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+#![cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(fuzzing))]
compile_error!("Fuzz targets need cfg=fuzzing");
use bitcoin::hash_types::BlockHash;
use lightning::blinded_path::BlindedPath;
-use lightning::blinded_path::message::ForwardNode;
use lightning::blinded_path::payment::ReceiveTlvs;
use lightning::chain;
use lightning::chain::{BestBlock, ChannelMonitorUpdateStatus, chainmonitor, channelmonitor, Confirm, Watch};
}
fn create_blinded_paths<T: secp256k1::Signing + secp256k1::Verification>(
- &self, _recipient: PublicKey, _peers: Vec<ForwardNode>, _secp_ctx: &Secp256k1<T>,
+ &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _secp_ctx: &Secp256k1<T>,
) -> Result<Vec<BlindedPath>, ()> {
unreachable!()
}
use bitcoin::hash_types::BlockHash;
use lightning::chain::channelmonitor;
+use lightning::util::ser::{ReadableArgs, Writeable, Writer};
use lightning::util::test_channel_signer::TestChannelSigner;
-use lightning::util::ser::{ReadableArgs, Writer, Writeable};
use lightning::util::test_utils::OnlyReadsKeysInterface;
use crate::utils::test_logger;
#[inline]
pub fn do_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
- if let Ok((latest_block_hash, monitor)) = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(&mut Cursor::new(data), (&OnlyReadsKeysInterface {}, &OnlyReadsKeysInterface {})) {
+ if let Ok((latest_block_hash, monitor)) =
+ <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
+ &mut Cursor::new(data),
+ (&OnlyReadsKeysInterface {}, &OnlyReadsKeysInterface {}),
+ ) {
let mut w = VecWriter(Vec::new());
monitor.write(&mut w).unwrap();
- let deserialized_copy = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(&mut Cursor::new(&w.0), (&OnlyReadsKeysInterface {}, &OnlyReadsKeysInterface {})).unwrap();
+ let deserialized_copy =
+ <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
+ &mut Cursor::new(&w.0),
+ (&OnlyReadsKeysInterface {}, &OnlyReadsKeysInterface {}),
+ )
+ .unwrap();
assert!(latest_block_hash == deserialized_copy.0);
assert!(monitor == deserialized_copy.1);
}
#[no_mangle]
pub extern "C" fn chanmon_deser_run(data: *const u8, datalen: usize) {
- do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull{});
+ do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, test_logger::DevNull {});
}
// You may not use this file except in accordance with one or both of these
// licenses.
-use lightning::ln::msgs::SocketAddress;
use core::str::FromStr;
+use lightning::ln::msgs::SocketAddress;
use crate::utils::test_logger;
if let Ok(s) = std::str::from_utf8(data) {
let _ = SocketAddress::from_str(s);
}
-
}
pub fn fromstr_to_netaddress_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
pub extern "C" fn fromstr_to_netaddress_run(data: *const u8, datalen: usize) {
do_test(unsafe { std::slice::from_raw_parts(data, datalen) });
}
-
use bitcoin::hash_types::{Txid, BlockHash};
use lightning::blinded_path::BlindedPath;
-use lightning::blinded_path::message::ForwardNode;
use lightning::blinded_path::payment::ReceiveTlvs;
use lightning::chain;
use lightning::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen};
use std::convert::TryInto;
use std::cmp;
use std::sync::{Arc, Mutex};
-use std::sync::atomic::{AtomicU64,AtomicUsize,Ordering};
+use std::sync::atomic::{AtomicU64,AtomicUsize,AtomicBool,Ordering};
use bech32::u5;
#[inline]
struct InputData {
data: Vec<u8>,
read_pos: AtomicUsize,
+ halt_fee_est_reads: AtomicBool,
}
impl InputData {
fn get_slice(&self, len: usize) -> Option<&[u8]> {
}
impl FeeEstimator for FuzzEstimator {
fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
+ if self.input.halt_fee_est_reads.load(Ordering::Acquire) {
+ return 253;
+ }
//TODO: We should actually be testing at least much more than 64k...
match self.input.get_slice(2) {
Some(slice) => cmp::max(slice_to_be16(slice) as u32, 253),
}
fn create_blinded_paths<T: secp256k1::Signing + secp256k1::Verification>(
- &self, _recipient: PublicKey, _peers: Vec<ForwardNode>, _secp_ctx: &Secp256k1<T>,
+ &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _secp_ctx: &Secp256k1<T>,
) -> Result<Vec<BlindedPath>, ()> {
unreachable!()
}
let input = Arc::new(InputData {
data: data.to_vec(),
read_pos: AtomicUsize::new(0),
+ halt_fee_est_reads: AtomicBool::new(false),
});
let fee_est = Arc::new(FuzzEstimator {
input: input.clone(),
11 => {
let mut txn = broadcast.txn_broadcasted.lock().unwrap().split_off(0);
if !txn.is_empty() {
+ input.halt_fee_est_reads.store(true, Ordering::Release);
loss_detector.connect_block(&txn[..]);
for _ in 2..100 {
loss_detector.connect_block(&[]);
}
+ input.halt_fee_est_reads.store(false, Ordering::Release);
}
for tx in txn.drain(..) {
loss_detector.funding_txn.push(tx);
ext_from_hex("0c005e", &mut test);
// the funding transaction
ext_from_hex("020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae0000000000000000000000000000000000000000000000000000000000000000000000", &mut test);
+ ext_from_hex("00fd00fd", &mut test); // Two feerate requests during block connection
// connect a block with no transactions, one per line
ext_from_hex("0c0000", &mut test);
+ ext_from_hex("00fd00fd", &mut test); // Two feerate requests during block connection
ext_from_hex("0c0000", &mut test);
+ ext_from_hex("00fd00fd", &mut test); // Two feerate requests during block connection
ext_from_hex("0c0000", &mut test);
+ ext_from_hex("00fd00fd", &mut test); // Two feerate requests during block connection
ext_from_hex("0c0000", &mut test);
+ ext_from_hex("00fd00fd", &mut test); // Two feerate requests during block connection
ext_from_hex("0c0000", &mut test);
+ ext_from_hex("00fd00fd", &mut test); // Two feerate requests during block connection
ext_from_hex("0c0000", &mut test);
+ ext_from_hex("00fd00fd", &mut test); // Two feerate requests during block connection
ext_from_hex("0c0000", &mut test);
+ ext_from_hex("00fd00fd", &mut test); // Two feerate requests during block connection
ext_from_hex("0c0000", &mut test);
+ ext_from_hex("00fd00fd", &mut test); // Two feerate requests during block connection
ext_from_hex("0c0000", &mut test);
+ ext_from_hex("00fd00fd", &mut test); // Two feerate requests during block connection
ext_from_hex("0c0000", &mut test);
+ ext_from_hex("00fd00fd", &mut test); // Two feerate requests during block connection
ext_from_hex("0c0000", &mut test);
+ ext_from_hex("00fd00fd", &mut test); // Two feerate requests during block connection
ext_from_hex("0c0000", &mut test);
+ ext_from_hex("00fd00fd", &mut test); // Two feerate requests during block connection
// by now client should have sent a channel_ready (CHECK 3: SendChannelReady to 03000000 for chan 3d000000)
// inbound read from peer id 0 of len 18
ext_from_hex("0c007d", &mut test);
// the commitment transaction for channel 3f00000000000000000000000000000000000000000000000000000000000000
ext_from_hex("02000000013a000000000000000000000000000000000000000000000000000000000000000000000000000000800258020000000000002200204b0000000000000000000000000000000000000000000000000000000000000014c0000000000000160014280000000000000000000000000000000000000005000020", &mut test);
+ ext_from_hex("00fd00fd", &mut test); // Two feerate requests during block connection
//
// connect a block with one transaction of len 94
ext_from_hex("0c005e", &mut test);
// the HTLC timeout transaction
ext_from_hex("0200000001730000000000000000000000000000000000000000000000000000000000000000000000000000000001a701000000000000220020b20000000000000000000000000000000000000000000000000000000000000000000000", &mut test);
+ ext_from_hex("00fd00fd", &mut test); // Two feerate requests during block connection
// connect a block with no transactions
ext_from_hex("0c0000", &mut test);
+ ext_from_hex("00fd00fd", &mut test); // Two feerate requests during block connection
// connect a block with no transactions
ext_from_hex("0c0000", &mut test);
+ ext_from_hex("00fd00fd", &mut test); // Two feerate requests during block connection
// connect a block with no transactions
ext_from_hex("0c0000", &mut test);
+ ext_from_hex("00fd00fd", &mut test); // Two feerate requests during block connection
// connect a block with no transactions
ext_from_hex("0c0000", &mut test);
+ ext_from_hex("00fd00fd", &mut test); // Two feerate requests during block connection
// connect a block with no transactions
ext_from_hex("0c0000", &mut test);
+ ext_from_hex("00fd00fd", &mut test); // Two feerate requests during block connection
// process the now-pending HTLC forward
ext_from_hex("07", &mut test);
// You may not use this file except in accordance with one or both of these
// licenses.
-use lightning::util::indexed_map::{IndexedMap, self};
-use std::collections::{BTreeMap, btree_map};
use lightning::util::hash_tables::*;
+use lightning::util::indexed_map::{self, IndexedMap};
+use std::collections::{btree_map, BTreeMap};
use crate::utils::test_logger;
-use std::ops::{RangeBounds, Bound};
+use std::ops::{Bound, RangeBounds};
struct ExclLowerInclUpper(u8, u8);
impl RangeBounds<u8> for ExclLowerInclUpper {
- fn start_bound(&self) -> Bound<&u8> { Bound::Excluded(&self.0) }
- fn end_bound(&self) -> Bound<&u8> { Bound::Included(&self.1) }
+ fn start_bound(&self) -> Bound<&u8> {
+ Bound::Excluded(&self.0)
+ }
+ fn end_bound(&self) -> Bound<&u8> {
+ Bound::Included(&self.1)
+ }
}
struct ExclLowerExclUpper(u8, u8);
impl RangeBounds<u8> for ExclLowerExclUpper {
- fn start_bound(&self) -> Bound<&u8> { Bound::Excluded(&self.0) }
- fn end_bound(&self) -> Bound<&u8> { Bound::Excluded(&self.1) }
+ fn start_bound(&self) -> Bound<&u8> {
+ Bound::Excluded(&self.0)
+ }
+ fn end_bound(&self) -> Bound<&u8> {
+ Bound::Excluded(&self.1)
+ }
}
fn check_eq(btree: &BTreeMap<u8, u8>, mut indexed: IndexedMap<u8, u8>) {
if let indexed_map::Entry::Occupied(mut io) = indexed_entry {
assert_eq!(bo.get(), io.get());
assert_eq!(bo.get_mut(), io.get_mut());
- } else { panic!(); }
+ } else {
+ panic!();
+ }
},
btree_map::Entry::Vacant(_) => {
if let indexed_map::Entry::Vacant(_) = indexed_entry {
- } else { panic!(); }
- }
+ } else {
+ panic!();
+ }
+ },
}
}
const STRIDE: u8 = 16;
for range_type in 0..4 {
- for k in 0..=255/STRIDE {
+ for k in 0..=255 / STRIDE {
let lower_bound = k * STRIDE;
let upper_bound = lower_bound + (STRIDE - 1);
- macro_rules! range { ($map: expr) => {
- match range_type {
- 0 => $map.range(lower_bound..upper_bound),
- 1 => $map.range(lower_bound..=upper_bound),
- 2 => $map.range(ExclLowerInclUpper(lower_bound, upper_bound)),
- 3 => $map.range(ExclLowerExclUpper(lower_bound, upper_bound)),
- _ => unreachable!(),
- }
- } }
+ macro_rules! range {
+ ($map: expr) => {
+ match range_type {
+ 0 => $map.range(lower_bound..upper_bound),
+ 1 => $map.range(lower_bound..=upper_bound),
+ 2 => $map.range(ExclLowerInclUpper(lower_bound, upper_bound)),
+ 3 => $map.range(ExclLowerExclUpper(lower_bound, upper_bound)),
+ _ => unreachable!(),
+ }
+ };
+ }
let mut btree_iter = range!(btree);
let mut indexed_iter = range!(indexed);
loop {
let b_v = btree_iter.next();
let i_v = indexed_iter.next();
assert_eq!(b_v, i_v);
- if b_v.is_none() { break; }
+ if b_v.is_none() {
+ break;
+ }
}
}
}
#[inline]
pub fn do_test(data: &[u8]) {
- if data.len() % 2 != 0 { return; }
+ if data.len() % 2 != 0 {
+ return;
+ }
let mut btree = BTreeMap::new();
let mut indexed = IndexedMap::new();
} else {
assert_eq!(bo.remove_entry(), io.remove_entry());
}
- } else { panic!(); }
+ } else {
+ panic!();
+ }
},
btree_map::Entry::Vacant(bv) => {
if let indexed_map::Entry::Vacant(iv) = indexed.entry(k) {
bv.insert(k);
iv.insert(k);
- } else { panic!(); }
+ } else {
+ panic!();
+ }
},
}
}
// You may not use this file except in accordance with one or both of these
// licenses.
-use bitcoin::secp256k1::{Keypair, Parity, PublicKey, Secp256k1, SecretKey, self};
use crate::utils::test_logger;
+use bitcoin::secp256k1::{self, Keypair, Parity, PublicKey, Secp256k1, SecretKey};
use core::convert::TryFrom;
-use lightning::blinded_path::BlindedPath;
use lightning::blinded_path::message::ForwardNode;
-use lightning::sign::EntropySource;
-use lightning::ln::PaymentHash;
+use lightning::blinded_path::BlindedPath;
use lightning::ln::features::BlindedHopFeatures;
+use lightning::ln::PaymentHash;
use lightning::offers::invoice::{BlindedPayInfo, UnsignedBolt12Invoice};
use lightning::offers::invoice_request::InvoiceRequest;
use lightning::offers::parse::Bolt12SemanticError;
+use lightning::sign::EntropySource;
use lightning::util::ser::Writeable;
#[inline]
let even_pubkey = x_only_pubkey.public_key(Parity::Even);
if signing_pubkey == odd_pubkey || signing_pubkey == even_pubkey {
unsigned_invoice
- .sign(|message: &UnsignedBolt12Invoice|
+ .sign(|message: &UnsignedBolt12Invoice| {
Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
- )
+ })
.unwrap()
.write(&mut buffer)
.unwrap();
} else {
unsigned_invoice
- .sign(|message: &UnsignedBolt12Invoice|
+ .sign(|message: &UnsignedBolt12Invoice| {
Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
- )
+ })
.unwrap_err();
}
}
struct Randomness;
impl EntropySource for Randomness {
- fn get_secure_random_bytes(&self) -> [u8; 32] { [42; 32] }
+ fn get_secure_random_bytes(&self) -> [u8; 32] {
+ [42; 32]
+ }
}
fn pubkey(byte: u8) -> PublicKey {
}
fn build_response<T: secp256k1::Signing + secp256k1::Verification>(
- invoice_request: &InvoiceRequest, secp_ctx: &Secp256k1<T>
+ invoice_request: &InvoiceRequest, secp_ctx: &Secp256k1<T>,
) -> Result<UnsignedBolt12Invoice, Bolt12SemanticError> {
let entropy_source = Randomness {};
let intermediate_nodes = [
],
];
let paths = vec![
- BlindedPath::new_for_message(&intermediate_nodes[0], pubkey(42), &entropy_source, secp_ctx).unwrap(),
- BlindedPath::new_for_message(&intermediate_nodes[1], pubkey(42), &entropy_source, secp_ctx).unwrap(),
+ BlindedPath::new_for_message(&intermediate_nodes[0], pubkey(42), &entropy_source, secp_ctx)
+ .unwrap(),
+ BlindedPath::new_for_message(&intermediate_nodes[1], pubkey(42), &entropy_source, secp_ctx)
+ .unwrap(),
];
let payinfo = vec![
// licenses.
extern crate bitcoin;
+extern crate hex;
extern crate lightning;
extern crate lightning_rapid_gossip_sync;
-extern crate hex;
pub mod utils;
+pub mod base32;
pub mod bech32_parse;
-pub mod chanmon_deser;
+pub mod bolt11_deser;
pub mod chanmon_consistency;
+pub mod chanmon_deser;
+pub mod fromstr_to_netaddress;
pub mod full_stack;
pub mod indexedmap;
pub mod invoice_deser;
pub mod invoice_request_deser;
pub mod offer_deser;
-pub mod bolt11_deser;
+pub mod onion_hop_data;
pub mod onion_message;
pub mod peer_crypt;
pub mod process_network_graph;
pub mod refund_deser;
pub mod router;
pub mod zbase32;
-pub mod onion_hop_data;
-pub mod base32;
-pub mod fromstr_to_netaddress;
pub mod msg_targets;
echo "pub mod $tn;" >> mod.rs
}
-echo "mod utils;" > mod.rs
+{
+ echo "#![cfg_attr(rustfmt, rustfmt_skip)]"
+ echo "mod utils;"
+} > mod.rs
# Note when adding new targets here you should add a similar line in src/bin/gen_target.sh
+#![cfg_attr(rustfmt, rustfmt_skip)]
mod utils;
pub mod msg_accept_channel;
pub mod msg_announcement_signatures;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
+#![cfg_attr(rustfmt, rustfmt_skip)]
+
use crate::msg_targets::utils::VecWriter;
use crate::utils::test_logger;
// entirely
#[macro_export]
macro_rules! test_msg {
- ($MsgType: path, $data: ident) => {
- {
- use lightning::util::ser::{Writeable, Readable};
- let mut r = ::std::io::Cursor::new($data);
- if let Ok(msg) = <$MsgType as Readable>::read(&mut r) {
- let p = r.position() as usize;
- let mut w = VecWriter(Vec::new());
- msg.write(&mut w).unwrap();
+ ($MsgType: path, $data: ident) => {{
+ use lightning::util::ser::{Readable, Writeable};
+ let mut r = ::std::io::Cursor::new($data);
+ if let Ok(msg) = <$MsgType as Readable>::read(&mut r) {
+ let p = r.position() as usize;
+ let mut w = VecWriter(Vec::new());
+ msg.write(&mut w).unwrap();
- assert_eq!(w.0.len(), p);
- assert_eq!(msg.serialized_length(), p);
- assert_eq!(&r.into_inner()[..p], &w.0[..p]);
- }
+ assert_eq!(w.0.len(), p);
+ assert_eq!(msg.serialized_length(), p);
+ assert_eq!(&r.into_inner()[..p], &w.0[..p]);
}
- }
+ }};
}
// Tests a message that may lose data on roundtrip, but shoulnd't lose data compared to our
// re-serialization.
#[macro_export]
macro_rules! test_msg_simple {
- ($MsgType: path, $data: ident) => {
- {
- use lightning::util::ser::{Writeable, Readable};
- let mut r = ::std::io::Cursor::new($data);
- if let Ok(msg) = <$MsgType as Readable>::read(&mut r) {
- let mut w = VecWriter(Vec::new());
- msg.write(&mut w).unwrap();
- assert_eq!(msg.serialized_length(), w.0.len());
+ ($MsgType: path, $data: ident) => {{
+ use lightning::util::ser::{Readable, Writeable};
+ let mut r = ::std::io::Cursor::new($data);
+ if let Ok(msg) = <$MsgType as Readable>::read(&mut r) {
+ let mut w = VecWriter(Vec::new());
+ msg.write(&mut w).unwrap();
+ assert_eq!(msg.serialized_length(), w.0.len());
- let msg = <$MsgType as Readable>::read(&mut ::std::io::Cursor::new(&w.0)).unwrap();
- let mut w_two = VecWriter(Vec::new());
- msg.write(&mut w_two).unwrap();
- assert_eq!(&w.0[..], &w_two.0[..]);
- }
+ let msg = <$MsgType as Readable>::read(&mut ::std::io::Cursor::new(&w.0)).unwrap();
+ let mut w_two = VecWriter(Vec::new());
+ msg.write(&mut w_two).unwrap();
+ assert_eq!(&w.0[..], &w_two.0[..]);
}
- }
+ }};
}
// Tests a message that must survive roundtrip exactly, and must exactly empty the read buffer and
// split it back out on re-serialization.
#[macro_export]
macro_rules! test_msg_exact {
- ($MsgType: path, $data: ident) => {
- {
- use lightning::util::ser::{Writeable, Readable};
- let mut r = ::std::io::Cursor::new($data);
- if let Ok(msg) = <$MsgType as Readable>::read(&mut r) {
- let mut w = VecWriter(Vec::new());
- msg.write(&mut w).unwrap();
- assert_eq!(&r.into_inner()[..], &w.0[..]);
- assert_eq!(msg.serialized_length(), w.0.len());
- }
+ ($MsgType: path, $data: ident) => {{
+ use lightning::util::ser::{Readable, Writeable};
+ let mut r = ::std::io::Cursor::new($data);
+ if let Ok(msg) = <$MsgType as Readable>::read(&mut r) {
+ let mut w = VecWriter(Vec::new());
+ msg.write(&mut w).unwrap();
+ assert_eq!(&r.into_inner()[..], &w.0[..]);
+ assert_eq!(msg.serialized_length(), w.0.len());
}
- }
+ }};
}
// Tests a message that must survive roundtrip exactly, modulo one "hole" which may be set to
// any value on re-serialization.
#[macro_export]
macro_rules! test_msg_hole {
- ($MsgType: path, $data: ident, $hole: expr, $hole_len: expr) => {
- {
- use lightning::util::ser::{Writeable, Readable};
- let mut r = ::std::io::Cursor::new($data);
- if let Ok(msg) = <$MsgType as Readable>::read(&mut r) {
- let mut w = VecWriter(Vec::new());
- msg.write(&mut w).unwrap();
- let p = w.0.len() as usize;
- assert_eq!(msg.serialized_length(), p);
+ ($MsgType: path, $data: ident, $hole: expr, $hole_len: expr) => {{
+ use lightning::util::ser::{Readable, Writeable};
+ let mut r = ::std::io::Cursor::new($data);
+ if let Ok(msg) = <$MsgType as Readable>::read(&mut r) {
+ let mut w = VecWriter(Vec::new());
+ msg.write(&mut w).unwrap();
+ let p = w.0.len() as usize;
+ assert_eq!(msg.serialized_length(), p);
- assert_eq!(w.0.len(), p);
- assert_eq!(&r.get_ref()[..$hole], &w.0[..$hole]);
- assert_eq!(&r.get_ref()[$hole+$hole_len..p], &w.0[$hole+$hole_len..]);
- }
+ assert_eq!(w.0.len(), p);
+ assert_eq!(&r.get_ref()[..$hole], &w.0[..$hole]);
+ assert_eq!(&r.get_ref()[$hole + $hole_len..p], &w.0[$hole + $hole_len..]);
}
- }
+ }};
}
// You may not use this file except in accordance with one or both of these
// licenses.
-use bitcoin::secp256k1::{Keypair, PublicKey, Secp256k1, SecretKey};
use crate::utils::test_logger;
+use bitcoin::secp256k1::{Keypair, PublicKey, Secp256k1, SecretKey};
use core::convert::TryFrom;
use lightning::offers::invoice_request::UnsignedInvoiceRequest;
use lightning::offers::offer::{Amount, Offer, Quantity};
if let Ok(invoice_request) = build_response(&offer, pubkey) {
invoice_request
- .sign(|message: &UnsignedInvoiceRequest|
+ .sign(|message: &UnsignedInvoiceRequest| {
Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
- )
+ })
.unwrap()
.write(&mut buffer)
.unwrap();
}
fn build_response(
- offer: &Offer, pubkey: PublicKey
+ offer: &Offer, pubkey: PublicKey,
) -> Result<UnsignedInvoiceRequest, Bolt12SemanticError> {
let mut builder = offer.request_invoice(vec![42; 64], pubkey)?;
#[inline]
pub fn onion_hop_data_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
- use lightning::util::ser::ReadableArgs;
use bitcoin::secp256k1::PublicKey;
+ use lightning::util::ser::ReadableArgs;
let mut r = ::std::io::Cursor::new(data);
let node_signer = test_utils::TestNodeSigner::new(test_utils::privkey(42));
- let _ = <lightning::ln::msgs::InboundOnionPayload as ReadableArgs<(Option<PublicKey>, &&test_utils::TestNodeSigner)>>::read(&mut r, (None, &&node_signer));
+ let _ = <lightning::ln::msgs::InboundOnionPayload as ReadableArgs<(
+ Option<PublicKey>,
+ &&test_utils::TestNodeSigner,
+ )>>::read(&mut r, (None, &&node_signer));
}
#[no_mangle]
pub extern "C" fn onion_hop_data_run(data: *const u8, datalen: usize) {
- use lightning::util::ser::ReadableArgs;
use bitcoin::secp256k1::PublicKey;
+ use lightning::util::ser::ReadableArgs;
let data = unsafe { std::slice::from_raw_parts(data, datalen) };
let mut r = ::std::io::Cursor::new(data);
let node_signer = test_utils::TestNodeSigner::new(test_utils::privkey(42));
- let _ = <lightning::ln::msgs::InboundOnionPayload as ReadableArgs<(Option<PublicKey>, &&test_utils::TestNodeSigner)>>::read(&mut r, (None, &&node_signer));
+ let _ = <lightning::ln::msgs::InboundOnionPayload as ReadableArgs<(
+ Option<PublicKey>,
+ &&test_utils::TestNodeSigner,
+ )>>::read(&mut r, (None, &&node_signer));
}
// Imports that need to be added manually
use bech32::u5;
use bitcoin::blockdata::script::ScriptBuf;
-use bitcoin::secp256k1::{PublicKey, Scalar, Secp256k1, SecretKey, self};
use bitcoin::secp256k1::ecdh::SharedSecret;
use bitcoin::secp256k1::ecdsa::RecoverableSignature;
use bitcoin::secp256k1::schnorr;
+use bitcoin::secp256k1::{self, PublicKey, Scalar, Secp256k1, SecretKey};
use lightning::blinded_path::{BlindedPath, EmptyNodeIdLookUp};
-use lightning::blinded_path::message::ForwardNode;
use lightning::ln::features::InitFeatures;
use lightning::ln::msgs::{self, DecodeError, OnionMessageHandler};
use lightning::ln::script::ShutdownScript;
use lightning::offers::invoice::UnsignedBolt12Invoice;
use lightning::offers::invoice_request::UnsignedInvoiceRequest;
-use lightning::sign::{Recipient, KeyMaterial, EntropySource, NodeSigner, SignerProvider};
-use lightning::util::test_channel_signer::TestChannelSigner;
-use lightning::util::logger::Logger;
-use lightning::util::ser::{Readable, Writeable, Writer};
-use lightning::onion_message::messenger::{CustomOnionMessageHandler, Destination, MessageRouter, OnionMessagePath, OnionMessenger, PendingOnionMessage, Responder, ResponseInstruction};
+use lightning::onion_message::messenger::{
+ CustomOnionMessageHandler, Destination, MessageRouter, OnionMessagePath, OnionMessenger,
+ PendingOnionMessage, Responder, ResponseInstruction,
+};
use lightning::onion_message::offers::{OffersMessage, OffersMessageHandler};
use lightning::onion_message::packet::OnionMessageContents;
+use lightning::sign::{EntropySource, KeyMaterial, NodeSigner, Recipient, SignerProvider};
+use lightning::util::logger::Logger;
+use lightning::util::ser::{Readable, Writeable, Writer};
+use lightning::util::test_channel_signer::TestChannelSigner;
use crate::utils::test_logger;
let mut secret_bytes = [1; 32];
secret_bytes[31] = 2;
let secret = SecretKey::from_slice(&secret_bytes).unwrap();
- let keys_manager = KeyProvider {
- node_secret: secret,
- counter: AtomicU64::new(0),
- };
+ let keys_manager = KeyProvider { node_secret: secret, counter: AtomicU64::new(0) };
let node_id_lookup = EmptyNodeIdLookUp {};
let message_router = TestMessageRouter {};
let offers_msg_handler = TestOffersMessageHandler {};
let custom_msg_handler = TestCustomMessageHandler {};
let onion_messenger = OnionMessenger::new(
- &keys_manager, &keys_manager, logger, &node_id_lookup, &message_router,
- &offers_msg_handler, &custom_msg_handler
+ &keys_manager,
+ &keys_manager,
+ logger,
+ &node_id_lookup,
+ &message_router,
+ &offers_msg_handler,
+ &custom_msg_handler,
);
let peer_node_id = {
impl MessageRouter for TestMessageRouter {
fn find_path(
- &self, _sender: PublicKey, _peers: Vec<PublicKey>, destination: Destination
+ &self, _sender: PublicKey, _peers: Vec<PublicKey>, destination: Destination,
) -> Result<OnionMessagePath, ()> {
- Ok(OnionMessagePath {
- intermediate_nodes: vec![],
- destination,
- first_node_addresses: None,
- })
+ Ok(OnionMessagePath { intermediate_nodes: vec![], destination, first_node_addresses: None })
}
fn create_blinded_paths<T: secp256k1::Signing + secp256k1::Verification>(
- &self, _recipient: PublicKey, _peers: Vec<ForwardNode>, _secp_ctx: &Secp256k1<T>,
+ &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _secp_ctx: &Secp256k1<T>,
) -> Result<Vec<BlindedPath>, ()> {
unreachable!()
}
struct TestOffersMessageHandler {}
impl OffersMessageHandler for TestOffersMessageHandler {
- fn handle_message(&self, _message: OffersMessage, _responder: Option<Responder>) -> ResponseInstruction<OffersMessage> {
+ fn handle_message(
+ &self, _message: OffersMessage, _responder: Option<Responder>,
+ ) -> ResponseInstruction<OffersMessage> {
ResponseInstruction::NoResponse
}
}
impl CustomOnionMessageHandler for TestCustomMessageHandler {
type CustomMessage = TestCustomMessage;
- fn handle_custom_message(&self, message: Self::CustomMessage, responder: Option<Responder>) -> ResponseInstruction<Self::CustomMessage> {
+ fn handle_custom_message(
+ &self, message: Self::CustomMessage, responder: Option<Responder>,
+ ) -> ResponseInstruction<Self::CustomMessage> {
match responder {
Some(responder) => responder.respond(message),
- None => ResponseInstruction::NoResponse
+ None => ResponseInstruction::NoResponse,
}
}
- fn read_custom_message<R: io::Read>(&self, _message_type: u64, buffer: &mut R) -> Result<Option<Self::CustomMessage>, msgs::DecodeError> {
+ fn read_custom_message<R: io::Read>(
+ &self, _message_type: u64, buffer: &mut R,
+ ) -> Result<Option<Self::CustomMessage>, msgs::DecodeError> {
let mut buf = Vec::new();
buffer.read_to_end(&mut buf)?;
- return Ok(Some(TestCustomMessage {}))
+ return Ok(Some(TestCustomMessage {}));
}
fn release_pending_custom_messages(&self) -> Vec<PendingOnionMessage<Self::CustomMessage>> {
vec![]
impl EntropySource for KeyProvider {
fn get_secure_random_bytes(&self) -> [u8; 32] {
let ctr = self.counter.fetch_add(1, Ordering::Relaxed);
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- (ctr >> 8*7) as u8, (ctr >> 8*6) as u8, (ctr >> 8*5) as u8, (ctr >> 8*4) as u8, (ctr >> 8*3) as u8, (ctr >> 8*2) as u8, (ctr >> 8*1) as u8, 14, (ctr >> 8*0) as u8]
+ #[rustfmt::skip]
+ let random_bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ (ctr >> 8*7) as u8, (ctr >> 8*6) as u8, (ctr >> 8*5) as u8, (ctr >> 8*4) as u8,
+ (ctr >> 8*3) as u8, (ctr >> 8*2) as u8, (ctr >> 8*1) as u8, 14, (ctr >> 8*0) as u8];
+ random_bytes
}
}
fn get_node_id(&self, recipient: Recipient) -> Result<PublicKey, ()> {
let node_secret = match recipient {
Recipient::Node => Ok(&self.node_secret),
- Recipient::PhantomNode => Err(())
+ Recipient::PhantomNode => Err(()),
}?;
Ok(PublicKey::from_secret_key(&Secp256k1::signing_only(), node_secret))
}
- fn ecdh(&self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>) -> Result<SharedSecret, ()> {
+ fn ecdh(
+ &self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>,
+ ) -> Result<SharedSecret, ()> {
let mut node_secret = match recipient {
Recipient::Node => Ok(self.node_secret.clone()),
- Recipient::PhantomNode => Err(())
+ Recipient::PhantomNode => Err(()),
}?;
if let Some(tweak) = tweak {
node_secret = node_secret.mul_tweak(tweak).map_err(|_| ())?;
Ok(SharedSecret::new(other_key, &node_secret))
}
- fn get_inbound_payment_key_material(&self) -> KeyMaterial { unreachable!() }
+ fn get_inbound_payment_key_material(&self) -> KeyMaterial {
+ unreachable!()
+ }
- fn sign_invoice(&self, _hrp_bytes: &[u8], _invoice_data: &[u5], _recipient: Recipient) -> Result<RecoverableSignature, ()> {
+ fn sign_invoice(
+ &self, _hrp_bytes: &[u8], _invoice_data: &[u5], _recipient: Recipient,
+ ) -> Result<RecoverableSignature, ()> {
unreachable!()
}
fn sign_bolt12_invoice_request(
- &self, _invoice_request: &UnsignedInvoiceRequest
+ &self, _invoice_request: &UnsignedInvoiceRequest,
) -> Result<schnorr::Signature, ()> {
unreachable!()
}
unreachable!()
}
- fn sign_gossip_message(&self, _msg: lightning::ln::msgs::UnsignedGossipMessage) -> Result<bitcoin::secp256k1::ecdsa::Signature, ()> {
+ fn sign_gossip_message(
+ &self, _msg: lightning::ln::msgs::UnsignedGossipMessage,
+ ) -> Result<bitcoin::secp256k1::ecdsa::Signature, ()> {
unreachable!()
}
}
#[cfg(taproot)]
type TaprootSigner = TestChannelSigner;
- fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] { unreachable!() }
+ fn generate_channel_keys_id(
+ &self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128,
+ ) -> [u8; 32] {
+ unreachable!()
+ }
- fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
+ fn derive_channel_signer(
+ &self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32],
+ ) -> Self::EcdsaSigner {
unreachable!()
}
- fn read_chan_signer(&self, _data: &[u8]) -> Result<TestChannelSigner, DecodeError> { unreachable!() }
+ fn read_chan_signer(&self, _data: &[u8]) -> Result<TestChannelSigner, DecodeError> {
+ unreachable!()
+ }
- fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> { unreachable!() }
+ fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
+ unreachable!()
+ }
- fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> { unreachable!() }
+ fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
+ unreachable!()
+ }
}
#[cfg(test)]
}
impl Logger for TrackingLogger {
fn log(&self, record: Record) {
- *self.lines.lock().unwrap().entry((record.module_path.to_string(), format!("{}", record.args))).or_insert(0) += 1;
- println!("{:<5} [{} : {}, {}] {}", record.level.to_string(), record.module_path, record.file, record.line, record.args);
+ let mut lines_lock = self.lines.lock().unwrap();
+ let key = (record.module_path.to_string(), format!("{}", record.args));
+ *lines_lock.entry(key).or_insert(0) += 1;
+ println!(
+ "{:<5} [{} : {}, {}] {}",
+ record.level.to_string(),
+ record.module_path,
+ record.file,
+ record.line,
+ record.args
+ );
}
}
// You may not use this file except in accordance with one or both of these
// licenses.
-use lightning::ln::peer_channel_encryptor::{PeerChannelEncryptor, MessageBuf};
+use lightning::ln::peer_channel_encryptor::{MessageBuf, PeerChannelEncryptor};
use lightning::util::test_utils::TestNodeSigner;
-use bitcoin::secp256k1::{Secp256k1, PublicKey, SecretKey};
+use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
use crate::utils::test_logger;
#[inline]
fn slice_to_be16(v: &[u8]) -> u16 {
- ((v[0] as u16) << 8*1) |
- ((v[1] as u16) << 8*0)
+ ((v[0] as u16) << 8 * 1) | ((v[1] as u16) << 8 * 0)
}
#[inline]
pub fn do_test(data: &[u8]) {
let mut read_pos = 0;
macro_rules! get_slice {
- ($len: expr) => {
- {
- let slice_len = $len as usize;
- if data.len() < read_pos + slice_len {
- return;
- }
- read_pos += slice_len;
- &data[read_pos - slice_len..read_pos]
+ ($len: expr) => {{
+ let slice_len = $len as usize;
+ if data.len() < read_pos + slice_len {
+ return;
}
- }
+ read_pos += slice_len;
+ &data[read_pos - slice_len..read_pos]
+ }};
}
let secp_ctx = Secp256k1::signing_only();
crypter
} else {
let mut crypter = PeerChannelEncryptor::new_inbound(&&node_signer);
- match crypter.process_act_one_with_keys(get_slice!(50), &&node_signer, ephemeral_key, &secp_ctx) {
+ match crypter.process_act_one_with_keys(
+ get_slice!(50),
+ &&node_signer,
+ ephemeral_key,
+ &secp_ctx,
+ ) {
Ok(_) => {},
Err(_) => return,
}
let mut buf = [0; 65536 + 16];
loop {
if get_slice!(1)[0] == 0 {
- crypter.encrypt_buffer(MessageBuf::from_encoded(&get_slice!(slice_to_be16(get_slice!(2)))));
+ crypter.encrypt_buffer(MessageBuf::from_encoded(&get_slice!(slice_to_be16(
+ get_slice!(2)
+ ))));
} else {
- let len = match crypter.decrypt_length_header(get_slice!(16+2)) {
+ let len = match crypter.decrypt_length_header(get_slice!(16 + 2)) {
Ok(len) => len,
Err(_) => return,
};
/// Actual fuzz test, method signature and name are fixed
fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
let logger = test_logger::TestLogger::new("".to_owned(), out);
- let network_graph = lightning::routing::gossip::NetworkGraph::new(bitcoin::Network::Bitcoin, &logger);
+ let network_graph =
+ lightning::routing::gossip::NetworkGraph::new(bitcoin::Network::Bitcoin, &logger);
let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
let _ = rapid_sync.update_network_graph(data);
}
// You may not use this file except in accordance with one or both of these
// licenses.
-use bitcoin::secp256k1::{Keypair, PublicKey, Secp256k1, SecretKey, self};
use crate::utils::test_logger;
+use bitcoin::secp256k1::{self, Keypair, PublicKey, Secp256k1, SecretKey};
use core::convert::TryFrom;
-use lightning::blinded_path::BlindedPath;
use lightning::blinded_path::message::ForwardNode;
-use lightning::sign::EntropySource;
-use lightning::ln::PaymentHash;
+use lightning::blinded_path::BlindedPath;
use lightning::ln::features::BlindedHopFeatures;
+use lightning::ln::PaymentHash;
use lightning::offers::invoice::{BlindedPayInfo, UnsignedBolt12Invoice};
use lightning::offers::parse::Bolt12SemanticError;
use lightning::offers::refund::Refund;
+use lightning::sign::EntropySource;
use lightning::util::ser::Writeable;
#[inline]
if let Ok(invoice) = build_response(&refund, pubkey, &secp_ctx) {
invoice
- .sign(|message: &UnsignedBolt12Invoice|
+ .sign(|message: &UnsignedBolt12Invoice| {
Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
- )
+ })
.unwrap()
.write(&mut buffer)
.unwrap();
struct Randomness;
impl EntropySource for Randomness {
- fn get_secure_random_bytes(&self) -> [u8; 32] { [42; 32] }
+ fn get_secure_random_bytes(&self) -> [u8; 32] {
+ [42; 32]
+ }
}
fn pubkey(byte: u8) -> PublicKey {
}
fn build_response<T: secp256k1::Signing + secp256k1::Verification>(
- refund: &Refund, signing_pubkey: PublicKey, secp_ctx: &Secp256k1<T>
+ refund: &Refund, signing_pubkey: PublicKey, secp_ctx: &Secp256k1<T>,
) -> Result<UnsignedBolt12Invoice, Bolt12SemanticError> {
let entropy_source = Randomness {};
let intermediate_nodes = [
],
];
let paths = vec![
- BlindedPath::new_for_message(&intermediate_nodes[0], pubkey(42), &entropy_source, secp_ctx).unwrap(),
- BlindedPath::new_for_message(&intermediate_nodes[1], pubkey(42), &entropy_source, secp_ctx).unwrap(),
+ BlindedPath::new_for_message(&intermediate_nodes[0], pubkey(42), &entropy_source, secp_ctx)
+ .unwrap(),
+ BlindedPath::new_for_message(&intermediate_nodes[1], pubkey(42), &entropy_source, secp_ctx)
+ .unwrap(),
];
let payinfo = vec![
use lightning::blinded_path::{BlindedHop, BlindedPath, IntroductionNode};
use lightning::chain::transaction::OutPoint;
-use lightning::ln::ChannelId;
-use lightning::ln::channel_state::{ChannelDetails, ChannelCounterparty, ChannelShutdownState};
+use lightning::ln::channel_state::{ChannelCounterparty, ChannelDetails, ChannelShutdownState};
use lightning::ln::channelmanager;
use lightning::ln::features::{BlindedHopFeatures, Bolt12InvoiceFeatures};
use lightning::ln::msgs;
+use lightning::ln::ChannelId;
use lightning::offers::invoice::BlindedPayInfo;
use lightning::routing::gossip::{NetworkGraph, RoutingFees};
+use lightning::routing::router::{
+ find_route, PaymentParameters, RouteHint, RouteHintHop, RouteParameters,
+};
+use lightning::routing::scoring::{
+ ProbabilisticScorer, ProbabilisticScoringDecayParameters, ProbabilisticScoringFeeParameters,
+};
use lightning::routing::utxo::{UtxoFuture, UtxoLookup, UtxoLookupError, UtxoResult};
-use lightning::routing::router::{find_route, PaymentParameters, RouteHint, RouteHintHop, RouteParameters};
-use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters};
use lightning::util::config::UserConfig;
use lightning::util::hash_tables::*;
use lightning::util::ser::Readable;
use bitcoin::hashes::Hash;
-use bitcoin::secp256k1::PublicKey;
use bitcoin::network::Network;
+use bitcoin::secp256k1::PublicKey;
use crate::utils::test_logger;
use std::convert::TryInto;
-use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::Arc;
#[inline]
pub fn slice_to_be16(v: &[u8]) -> u16 {
- ((v[0] as u16) << 8*1) |
- ((v[1] as u16) << 8*0)
+ ((v[0] as u16) << 8 * 1) | ((v[1] as u16) << 8 * 0)
}
#[inline]
pub fn slice_to_be32(v: &[u8]) -> u32 {
- ((v[0] as u32) << 8*3) |
- ((v[1] as u32) << 8*2) |
- ((v[2] as u32) << 8*1) |
- ((v[3] as u32) << 8*0)
+ ((v[0] as u32) << 8 * 3)
+ | ((v[1] as u32) << 8 * 2)
+ | ((v[2] as u32) << 8 * 1)
+ | ((v[3] as u32) << 8 * 0)
}
#[inline]
pub fn slice_to_be64(v: &[u8]) -> u64 {
- ((v[0] as u64) << 8*7) |
- ((v[1] as u64) << 8*6) |
- ((v[2] as u64) << 8*5) |
- ((v[3] as u64) << 8*4) |
- ((v[4] as u64) << 8*3) |
- ((v[5] as u64) << 8*2) |
- ((v[6] as u64) << 8*1) |
- ((v[7] as u64) << 8*0)
+ ((v[0] as u64) << 8 * 7)
+ | ((v[1] as u64) << 8 * 6)
+ | ((v[2] as u64) << 8 * 5)
+ | ((v[3] as u64) << 8 * 4)
+ | ((v[4] as u64) << 8 * 3)
+ | ((v[5] as u64) << 8 * 2)
+ | ((v[6] as u64) << 8 * 1)
+ | ((v[7] as u64) << 8 * 0)
}
-
struct InputData {
data: Vec<u8>,
read_pos: AtomicUsize,
impl<Out: test_logger::Output> UtxoLookup for FuzzChainSource<'_, '_, Out> {
fn get_utxo(&self, _chain_hash: &ChainHash, _short_channel_id: u64) -> UtxoResult {
let input_slice = self.input.get_slice(2);
- if input_slice.is_none() { return UtxoResult::Sync(Err(UtxoLookupError::UnknownTx)); }
+ if input_slice.is_none() {
+ return UtxoResult::Sync(Err(UtxoLookupError::UnknownTx));
+ }
let input_slice = input_slice.unwrap();
let txo_res = TxOut {
value: Amount::from_sat(if input_slice[0] % 2 == 0 { 1_000_000 } else { 1_000 }),
#[inline]
pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
- let input = Arc::new(InputData {
- data: data.to_vec(),
- read_pos: AtomicUsize::new(0),
- });
+ let input = Arc::new(InputData { data: data.to_vec(), read_pos: AtomicUsize::new(0) });
macro_rules! get_slice_nonadvancing {
($len: expr) => {
match input.get_slice_nonadvancing($len as usize) {
Some(slice) => slice,
None => return,
}
- }
+ };
}
macro_rules! get_slice {
($len: expr) => {
Some(slice) => slice,
None => return,
}
- }
+ };
}
macro_rules! decode_msg {
msgs::DecodeError::Io(e) => panic!("{:?}", e),
msgs::DecodeError::UnsupportedCompression => return,
msgs::DecodeError::DangerousValue => return,
- }
+ },
}
- }}
+ }};
}
macro_rules! decode_msg_with_len16 {
- ($MsgType: path, $excess: expr) => {
- {
- let extra_len = slice_to_be16(get_slice_nonadvancing!(2));
- decode_msg!($MsgType, 2 + (extra_len as usize) + $excess)
- }
- }
+ ($MsgType: path, $excess: expr) => {{
+ let extra_len = slice_to_be16(get_slice_nonadvancing!(2));
+ decode_msg!($MsgType, 2 + (extra_len as usize) + $excess)
+ }};
}
macro_rules! get_pubkey_from_node_id {
Ok(pk) => pk,
Err(_) => return,
}
- }
+ };
}
macro_rules! get_pubkey {
Ok(key) => key,
Err(_) => return,
}
- }
+ };
}
let logger = test_logger::TestLogger::new("".to_owned(), out);
let our_pubkey = get_pubkey!();
let net_graph = NetworkGraph::new(Network::Bitcoin, &logger);
- let chain_source = FuzzChainSource {
- input: Arc::clone(&input),
- net_graph: &net_graph,
- };
+ let chain_source = FuzzChainSource { input: Arc::clone(&input), net_graph: &net_graph };
let mut node_pks = new_hash_map();
let mut scid = 42;
count => {
for _ in 0..count {
scid += 1;
- let (rnid, _) =
- node_pks.iter().skip(u16::from_be_bytes(get_slice!(2).try_into().unwrap()) as usize % node_pks.len()).next().unwrap();
+ let skip = u16::from_be_bytes(get_slice!(2).try_into().unwrap()) as usize
+ % node_pks.len();
+ let (rnid, _) = node_pks.iter().skip(skip).next().unwrap();
let capacity = u64::from_be_bytes(get_slice!(8).try_into().unwrap());
$first_hops_vec.push(ChannelDetails {
channel_id: ChannelId::new_zero(),
counterparty: ChannelCounterparty {
node_id: *rnid,
- features: channelmanager::provided_init_features(&UserConfig::default()),
+ features: channelmanager::provided_init_features(
+ &UserConfig::default(),
+ ),
unspendable_punishment_reserve: 0,
forwarding_info: None,
outbound_htlc_minimum_msat: None,
outbound_htlc_maximum_msat: None,
},
- funding_txo: Some(OutPoint { txid: bitcoin::Txid::from_slice(&[0; 32]).unwrap(), index: 0 }),
+ funding_txo: Some(OutPoint {
+ txid: bitcoin::Txid::from_slice(&[0; 32]).unwrap(),
+ index: 0,
+ }),
channel_type: None,
short_channel_id: Some(scid),
inbound_scid_alias: None,
outbound_scid_alias: None,
channel_value_satoshis: capacity,
- user_channel_id: 0, inbound_capacity_msat: 0,
+ user_channel_id: 0,
+ inbound_capacity_msat: 0,
unspendable_punishment_reserve: None,
confirmations_required: None,
confirmations: None,
force_close_spend_delay: None,
- is_outbound: true, is_channel_ready: true,
- is_usable: true, is_public: true,
+ is_outbound: true,
+ is_channel_ready: true,
+ is_usable: true,
+ is_public: true,
balance_msat: 0,
outbound_capacity_msat: capacity.saturating_mul(1000),
next_outbound_htlc_limit_msat: capacity.saturating_mul(1000),
Some(&$first_hops_vec[..])
},
}
- }
+ };
}
macro_rules! last_hops {
let count = get_slice!(1)[0];
for _ in 0..count {
scid += 1;
- let (rnid, _) =
- node_pks.iter().skip(slice_to_be16(get_slice!(2)) as usize % node_pks.len()).next().unwrap();
+ let skip = slice_to_be16(get_slice!(2)) as usize % node_pks.len();
+ let (rnid, _) = node_pks.iter().skip(skip).next().unwrap();
$last_hops.push(RouteHint(vec![RouteHintHop {
src_node_id: *rnid,
short_channel_id: scid,
htlc_maximum_msat: None,
}]));
}
- }
+ };
}
macro_rules! find_routes {
($first_hops: expr, $node_pks: expr, $route_params: expr) => {
- let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &net_graph, &logger);
+ let scorer = ProbabilisticScorer::new(
+ ProbabilisticScoringDecayParameters::default(),
+ &net_graph,
+ &logger,
+ );
let random_seed_bytes: [u8; 32] = [get_slice!(1)[0]; 32];
for (target, ()) in $node_pks {
let final_value_msat = slice_to_be64(get_slice!(8));
let final_cltv_expiry_delta = slice_to_be32(get_slice!(4));
let route_params = $route_params(final_value_msat, final_cltv_expiry_delta, target);
- let _ = find_route(&our_pubkey, &route_params, &net_graph,
- $first_hops.map(|c| c.iter().collect::<Vec<_>>()).as_ref().map(|a| a.as_slice()),
- &logger, &scorer, &ProbabilisticScoringFeeParameters::default(), &random_seed_bytes);
+ let _ = find_route(
+ &our_pubkey,
+ &route_params,
+ &net_graph,
+ $first_hops
+ .map(|c| c.iter().collect::<Vec<_>>())
+ .as_ref()
+ .map(|a| a.as_slice()),
+ &logger,
+ &scorer,
+ &ProbabilisticScoringFeeParameters::default(),
+ &random_seed_bytes,
+ );
}
- }
+ };
}
loop {
match get_slice!(1)[0] {
0 => {
let start_len = slice_to_be16(&get_slice_nonadvancing!(2)[0..2]) as usize;
- let addr_len = slice_to_be16(&get_slice_nonadvancing!(start_len+2 + 74)[start_len+2 + 72..start_len+2 + 74]);
- if addr_len > (37+1)*4 {
+ let addr_len = slice_to_be16(
+ &get_slice_nonadvancing!(start_len + 2 + 74)
+ [start_len + 2 + 72..start_len + 2 + 74],
+ );
+ if addr_len > (37 + 1) * 4 {
return;
}
let msg = decode_msg_with_len16!(msgs::UnsignedNodeAnnouncement, 288);
let _ = net_graph.update_node_from_unsigned_announcement(&msg);
},
1 => {
- let msg = decode_msg_with_len16!(msgs::UnsignedChannelAnnouncement, 32+8+33*4);
+ let msg =
+ decode_msg_with_len16!(msgs::UnsignedChannelAnnouncement, 32 + 8 + 33 * 4);
node_pks.insert(get_pubkey_from_node_id!(msg.node_id_1), ());
node_pks.insert(get_pubkey_from_node_id!(msg.node_id_2), ());
- let _ = net_graph.update_channel_from_unsigned_announcement::
- <&FuzzChainSource<'_, '_, Out>>(&msg, &None);
+ let _ = net_graph
+ .update_channel_from_unsigned_announcement::<&FuzzChainSource<'_, '_, Out>>(
+ &msg, &None,
+ );
},
2 => {
- let msg = decode_msg_with_len16!(msgs::UnsignedChannelAnnouncement, 32+8+33*4);
+ let msg =
+ decode_msg_with_len16!(msgs::UnsignedChannelAnnouncement, 32 + 8 + 33 * 4);
node_pks.insert(get_pubkey_from_node_id!(msg.node_id_1), ());
node_pks.insert(get_pubkey_from_node_id!(msg.node_id_2), ());
- let _ = net_graph.update_channel_from_unsigned_announcement(&msg, &Some(&chain_source));
+ let _ =
+ net_graph.update_channel_from_unsigned_announcement(&msg, &Some(&chain_source));
},
3 => {
- let _ = net_graph.update_channel_unsigned(&decode_msg!(msgs::UnsignedChannelUpdate, 72));
+ let _ = net_graph
+ .update_channel_unsigned(&decode_msg!(msgs::UnsignedChannelUpdate, 72));
},
4 => {
let short_channel_id = slice_to_be64(get_slice!(8));
let first_hops = first_hops!(first_hops_vec);
let mut last_hops = Vec::new();
last_hops!(last_hops);
- find_routes!(first_hops, node_pks.iter(), |final_amt, final_delta, target: &PublicKey| {
- RouteParameters::from_payment_params_and_value(
- PaymentParameters::from_node_id(*target, final_delta)
- .with_route_hints(last_hops.clone()).unwrap(),
- final_amt)
- });
+ find_routes!(
+ first_hops,
+ node_pks.iter(),
+ |final_amt, final_delta, target: &PublicKey| {
+ RouteParameters::from_payment_params_and_value(
+ PaymentParameters::from_node_id(*target, final_delta)
+ .with_route_hints(last_hops.clone())
+ .unwrap(),
+ final_amt,
+ )
+ }
+ );
},
x => {
let mut first_hops_vec = Vec::new();
let mut last_hops_unblinded = Vec::new();
last_hops!(last_hops_unblinded);
let dummy_pk = PublicKey::from_slice(&[2; 33]).unwrap();
- let last_hops: Vec<(BlindedPayInfo, BlindedPath)> = last_hops_unblinded.into_iter().map(|hint| {
- let hop = &hint.0[0];
- let payinfo = BlindedPayInfo {
- fee_base_msat: hop.fees.base_msat,
- fee_proportional_millionths: hop.fees.proportional_millionths,
- htlc_minimum_msat: hop.htlc_minimum_msat.unwrap(),
- htlc_maximum_msat: hop.htlc_minimum_msat.unwrap().saturating_mul(100),
- cltv_expiry_delta: hop.cltv_expiry_delta,
- features: BlindedHopFeatures::empty(),
- };
- let num_blinded_hops = x % 250;
- let mut blinded_hops = Vec::new();
- for _ in 0..num_blinded_hops {
- blinded_hops.push(BlindedHop {
- blinded_node_id: dummy_pk,
- encrypted_payload: Vec::new()
- });
- }
- (payinfo, BlindedPath {
- introduction_node: IntroductionNode::NodeId(hop.src_node_id),
- blinding_point: dummy_pk,
- blinded_hops,
+ let last_hops: Vec<(BlindedPayInfo, BlindedPath)> = last_hops_unblinded
+ .into_iter()
+ .map(|hint| {
+ let hop = &hint.0[0];
+ let payinfo = BlindedPayInfo {
+ fee_base_msat: hop.fees.base_msat,
+ fee_proportional_millionths: hop.fees.proportional_millionths,
+ htlc_minimum_msat: hop.htlc_minimum_msat.unwrap(),
+ htlc_maximum_msat: hop.htlc_minimum_msat.unwrap().saturating_mul(100),
+ cltv_expiry_delta: hop.cltv_expiry_delta,
+ features: BlindedHopFeatures::empty(),
+ };
+ let num_blinded_hops = x % 250;
+ let mut blinded_hops = Vec::new();
+ for _ in 0..num_blinded_hops {
+ blinded_hops.push(BlindedHop {
+ blinded_node_id: dummy_pk,
+ encrypted_payload: Vec::new(),
+ });
+ }
+ (
+ payinfo,
+ BlindedPath {
+ introduction_node: IntroductionNode::NodeId(hop.src_node_id),
+ blinding_point: dummy_pk,
+ blinded_hops,
+ },
+ )
})
- }).collect();
+ .collect();
let mut features = Bolt12InvoiceFeatures::empty();
features.set_basic_mpp_optional();
find_routes!(first_hops, [(dummy_pk, ())].iter(), |final_amt, _, _| {
- RouteParameters::from_payment_params_and_value(PaymentParameters::blinded(last_hops.clone())
- .with_bolt12_features(features.clone()).unwrap(),
- final_amt)
+ RouteParameters::from_payment_params_and_value(
+ PaymentParameters::blinded(last_hops.clone())
+ .with_bolt12_features(features.clone())
+ .unwrap(),
+ final_amt,
+ )
});
- }
+ },
}
}
}
// licenses.
use lightning::util::logger::{Logger, Record};
-use std::sync::{Arc, Mutex};
use std::io::Write;
+use std::sync::{Arc, Mutex};
-pub trait Output : Clone + 'static {
+pub trait Output: Clone + 'static {
fn locked_write(&self, data: &[u8]);
}
}
}
-pub struct TestLogger<Out : Output> {
+pub struct TestLogger<Out: Output> {
id: String,
out: Out,
}
self.0.locked_write(data);
Ok(data.len())
}
- fn flush(&mut self) -> Result<(), std::io::Error> { Ok(()) }
+ fn flush(&mut self) -> Result<(), std::io::Error> {
+ Ok(())
+ }
}
impl<Out: Output> Logger for TestLogger<Out> {
fn log(&self, record: Record) {
- write!(LockedWriteAdapter(&self.out),
- "{:<5} {} [{} : {}] {}\n", record.level.to_string(), self.id, record.module_path, record.line, record.args)
- .unwrap();
+ write!(
+ LockedWriteAdapter(&self.out),
+ "{:<5} {} [{} : {}] {}\n",
+ record.level.to_string(),
+ self.id,
+ record.module_path,
+ record.line,
+ record.args
+ )
+ .unwrap();
}
}
use lightning::chain;
-use lightning::chain::{chainmonitor, channelmonitor};
use lightning::chain::transaction::OutPoint;
+use lightning::chain::{chainmonitor, channelmonitor};
use lightning::util::test_channel_signer::TestChannelSigner;
use std::sync::Mutex;
pub update_ret: Mutex<chain::ChannelMonitorUpdateStatus>,
}
impl chainmonitor::Persist<TestChannelSigner> for TestPersister {
- fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<TestChannelSigner>) -> chain::ChannelMonitorUpdateStatus {
+ fn persist_new_channel(
+ &self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<TestChannelSigner>,
+ ) -> chain::ChannelMonitorUpdateStatus {
self.update_ret.lock().unwrap().clone()
}
- fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<TestChannelSigner>) -> chain::ChannelMonitorUpdateStatus {
+ fn update_persisted_channel(
+ &self, _funding_txo: OutPoint, _update: Option<&channelmonitor::ChannelMonitorUpdate>,
+ _data: &channelmonitor::ChannelMonitor<TestChannelSigner>,
+ ) -> chain::ChannelMonitorUpdateStatus {
self.update_ret.lock().unwrap().clone()
}
- fn archive_persisted_channel(&self, _: OutPoint) {
- }
+ fn archive_persisted_channel(&self, _: OutPoint) {}
}
//!
//! # use bitcoin::secp256k1::PublicKey;
//! # use lightning::io;
-//! # use lightning::ln::msgs::{DecodeError, LightningError};
+//! # use lightning::ln::msgs::{DecodeError, Init, LightningError};
//! # use lightning::ln::features::{InitFeatures, NodeFeatures};
//! use lightning::ln::peer_handler::CustomMessageHandler;
//! use lightning::ln::wire::{CustomMessageReader, self};
//! # fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> {
//! # unimplemented!()
//! # }
+//! # fn peer_disconnected(&self, _their_node_id: &PublicKey) {
+//! # unimplemented!()
+//! # }
+//! # fn peer_connected(&self, _their_node_id: &PublicKey, _msg: &Init, _inbound: bool) -> Result<(), ()> {
+//! # unimplemented!()
+//! # }
//! # fn provided_node_features(&self) -> NodeFeatures {
//! # unimplemented!()
//! # }
//! # fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> {
//! # unimplemented!()
//! # }
+//! # fn peer_disconnected(&self, _their_node_id: &PublicKey) {
+//! # unimplemented!()
+//! # }
+//! # fn peer_connected(&self, _their_node_id: &PublicKey, _msg: &Init, _inbound: bool) -> Result<(), ()> {
+//! # unimplemented!()
+//! # }
//! # fn provided_node_features(&self) -> NodeFeatures {
//! # unimplemented!()
//! # }
//! # fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> {
//! # unimplemented!()
//! # }
+//! # fn peer_disconnected(&self, _their_node_id: &PublicKey) {
+//! # unimplemented!()
+//! # }
+//! # fn peer_connected(&self, _their_node_id: &PublicKey, _msg: &Init, _inbound: bool) -> Result<(), ()> {
+//! # unimplemented!()
+//! # }
//! # fn provided_node_features(&self) -> NodeFeatures {
//! # unimplemented!()
//! # }
.collect()
}
+ fn peer_disconnected(&self, their_node_id: &$crate::bitcoin::secp256k1::PublicKey) {
+ $(
+ self.$field.peer_disconnected(their_node_id);
+ )*
+ }
+
+ fn peer_connected(&self, their_node_id: &$crate::bitcoin::secp256k1::PublicKey, msg: &$crate::lightning::ln::msgs::Init, inbound: bool) -> Result<(), ()> {
+ let mut result = Ok(());
+ $(
+ if let Err(e) = self.$field.peer_connected(their_node_id, msg, inbound) {
+ result = Err(e);
+ }
+ )*
+ result
+ }
+
fn provided_node_features(&self) -> $crate::lightning::ln::features::NodeFeatures {
$crate::lightning::ln::features::NodeFeatures::empty()
$(
-use lightning::chain::{Confirm, WatchedOutput};
-use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
-use bitcoin::{Txid, BlockHash, Transaction, OutPoint};
use bitcoin::block::Header;
+use bitcoin::{BlockHash, OutPoint, Transaction, Txid};
+use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
+use lightning::chain::{Confirm, WatchedOutput};
-use std::collections::{HashSet, HashMap};
-
+use std::collections::{HashMap, HashSet};
+use std::ops::Deref;
// Represents the current state.
pub(crate) struct SyncState {
pending_sync: false,
}
}
- pub fn sync_unconfirmed_transactions(
- &mut self, confirmables: &Vec<&(dyn Confirm + Sync + Send)>,
- unconfirmed_txs: Vec<Txid>,
- ) {
+ pub fn sync_unconfirmed_transactions<C: Deref>(
+ &mut self, confirmables: &Vec<C>, unconfirmed_txs: Vec<Txid>,
+ ) where
+ C::Target: Confirm,
+ {
for txid in unconfirmed_txs {
for c in confirmables {
c.transaction_unconfirmed(&txid);
// If a previously-confirmed output spend is unconfirmed, re-add the watched output to
// the tracking map.
- self.outputs_spends_pending_threshold_conf.retain(|(conf_txid, _, prev_outpoint, output)| {
- if txid == *conf_txid {
- self.watched_outputs.insert(*prev_outpoint, output.clone());
- false
- } else {
- true
- }
- })
+ self.outputs_spends_pending_threshold_conf.retain(
+ |(conf_txid, _, prev_outpoint, output)| {
+ if txid == *conf_txid {
+ self.watched_outputs.insert(*prev_outpoint, output.clone());
+ false
+ } else {
+ true
+ }
+ },
+ )
}
}
- pub fn sync_confirmed_transactions(
- &mut self, confirmables: &Vec<&(dyn Confirm + Sync + Send)>,
- confirmed_txs: Vec<ConfirmedTx>
- ) {
+ pub fn sync_confirmed_transactions<C: Deref>(
+ &mut self, confirmables: &Vec<C>, confirmed_txs: Vec<ConfirmedTx>,
+ ) where
+ C::Target: Confirm,
+ {
for ctx in confirmed_txs {
for c in confirmables {
c.transactions_confirmed(
for input in &ctx.tx.input {
if let Some(output) = self.watched_outputs.remove(&input.previous_output) {
- self.outputs_spends_pending_threshold_conf.push((ctx.tx.txid(), ctx.block_height, input.previous_output, output));
+ let spent = (ctx.tx.txid(), ctx.block_height, input.previous_output, output);
+ self.outputs_spends_pending_threshold_conf.push(spent);
}
}
}
}
pub fn prune_output_spends(&mut self, cur_height: u32) {
- self.outputs_spends_pending_threshold_conf.retain(|(_, conf_height, _, _)| {
- cur_height < conf_height + ANTI_REORG_DELAY - 1
- });
+ self.outputs_spends_pending_threshold_conf
+ .retain(|(_, conf_height, _, _)| cur_height < conf_height + ANTI_REORG_DELAY - 1);
}
}
-
// A queue that is to be filled by `Filter` and drained during the next syncing round.
pub(crate) struct FilterQueue {
// Transactions that were registered via the `Filter` interface and have to be processed.
impl FilterQueue {
pub fn new() -> Self {
- Self {
- transactions: HashSet::new(),
- outputs: HashMap::new(),
- }
+ Self { transactions: HashSet::new(), outputs: HashMap::new() }
}
// Processes the transaction and output queues and adds them to the given [`SyncState`].
-use crate::common::{ConfirmedTx, SyncState, FilterQueue};
-use crate::error::{TxSyncError, InternalError};
+use crate::common::{ConfirmedTx, FilterQueue, SyncState};
+use crate::error::{InternalError, TxSyncError};
use electrum_client::Client as ElectrumClient;
use electrum_client::ElectrumApi;
use electrum_client::GetMerkleRes;
-use lightning::util::logger::Logger;
-use lightning::{log_error, log_debug, log_trace};
use lightning::chain::WatchedOutput;
use lightning::chain::{Confirm, Filter};
+use lightning::util::logger::Logger;
+use lightning::{log_debug, log_error, log_trace};
-use bitcoin::{BlockHash, Script, Transaction, Txid};
use bitcoin::block::Header;
use bitcoin::hash_types::TxMerkleNode;
-use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256d::Hash as Sha256d;
+use bitcoin::hashes::Hash;
+use bitcoin::{BlockHash, Script, Transaction, Txid};
+use std::collections::HashSet;
use std::ops::Deref;
use std::sync::Mutex;
-use std::collections::HashSet;
use std::time::Instant;
/// Synchronizes LDK with a given Electrum server.
let sync_state = Mutex::new(SyncState::new());
let queue = Mutex::new(FilterQueue::new());
- Ok(Self {
- sync_state,
- queue,
- client,
- logger,
- })
+ Ok(Self { sync_state, queue, client, logger })
}
/// Synchronizes the given `confirmables` via their [`Confirm`] interface implementations. This
/// [`ChainMonitor`]: lightning::chain::chainmonitor::ChainMonitor
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
/// [`Filter`]: lightning::chain::Filter
- pub fn sync(&self, confirmables: Vec<&(dyn Confirm + Sync + Send)>) -> Result<(), TxSyncError> {
+ pub fn sync<C: Deref>(&self, confirmables: Vec<C>) -> Result<(), TxSyncError>
+ where
+ C::Target: Confirm,
+ {
// This lock makes sure we're syncing once at a time.
let mut sync_state = self.sync_state.lock().unwrap();
num_unconfirmed += unconfirmed_txs.len();
sync_state.sync_unconfirmed_transactions(
&confirmables,
- unconfirmed_txs
+ unconfirmed_txs,
);
- }
+ },
Ok(true) => {
log_debug!(self.logger,
"Encountered inconsistency during transaction sync, restarting.");
sync_state.pending_sync = true;
continue;
- }
+ },
Err(err) => {
// (Semi-)permanent failure, retry later.
log_error!(self.logger,
);
sync_state.pending_sync = true;
return Err(TxSyncError::from(err));
- }
+ },
}
},
Err(err) => {
);
sync_state.pending_sync = true;
return Err(TxSyncError::from(err));
- }
+ },
}
// Update the best block.
match self.check_update_tip(&mut tip_header, &mut tip_height) {
Ok(false) => {
num_confirmed += confirmed_txs.len();
- sync_state.sync_confirmed_transactions(
- &confirmables,
- confirmed_txs
- );
- }
+ sync_state
+ .sync_confirmed_transactions(&confirmables, confirmed_txs);
+ },
Ok(true) => {
log_debug!(self.logger,
"Encountered inconsistency during transaction sync, restarting.");
sync_state.pending_sync = true;
continue;
- }
+ },
Err(err) => {
// (Semi-)permanent failure, retry later.
log_error!(self.logger,
);
sync_state.pending_sync = true;
return Err(TxSyncError::from(err));
- }
+ },
}
- }
+ },
Err(InternalError::Inconsistency) => {
// Immediately restart syncing when we encounter any inconsistencies.
- log_debug!(self.logger,
- "Encountered inconsistency during transaction sync, restarting.");
+ log_debug!(
+ self.logger,
+ "Encountered inconsistency during transaction sync, restarting."
+ );
sync_state.pending_sync = true;
continue;
- }
+ },
Err(err) => {
// (Semi-)permanent failure, retry later.
log_error!(self.logger,
);
sync_state.pending_sync = true;
return Err(TxSyncError::from(err));
- }
+ },
}
sync_state.last_sync_hash = Some(tip_header.block_hash());
sync_state.pending_sync = false;
}
}
#[cfg(feature = "time")]
- log_debug!(self.logger,
+ log_debug!(
+ self.logger,
"Finished transaction sync at tip {} in {}ms: {} confirmed, {} unconfirmed.",
- tip_header.block_hash(), start_time.elapsed().as_millis(), num_confirmed,
- num_unconfirmed);
+ tip_header.block_hash(),
+ start_time.elapsed().as_millis(),
+ num_confirmed,
+ num_unconfirmed
+ );
#[cfg(not(feature = "time"))]
- log_debug!(self.logger,
+ log_debug!(
+ self.logger,
"Finished transaction sync at tip {}: {} confirmed, {} unconfirmed.",
- tip_header.block_hash(), num_confirmed, num_unconfirmed);
+ tip_header.block_hash(),
+ num_confirmed,
+ num_unconfirmed
+ );
Ok(())
}
- fn check_update_tip(&self, cur_tip_header: &mut Header, cur_tip_height: &mut u32)
- -> Result<bool, InternalError>
- {
+ fn check_update_tip(
+ &self, cur_tip_header: &mut Header, cur_tip_height: &mut u32,
+ ) -> Result<bool, InternalError> {
let check_notification = self.client.block_headers_subscribe()?;
let check_tip_hash = check_notification.header.block_hash();
fn get_confirmed_transactions(
&self, sync_state: &SyncState,
) -> Result<Vec<ConfirmedTx>, InternalError> {
-
// First, check the confirmation status of registered transactions as well as the
// status of dependent transactions of registered outputs.
let mut confirmed_txs: Vec<ConfirmedTx> = Vec::new();
let mut watched_script_pubkeys = Vec::with_capacity(
- sync_state.watched_transactions.len() + sync_state.watched_outputs.len());
+ sync_state.watched_transactions.len() + sync_state.watched_outputs.len(),
+ );
let mut watched_txs = Vec::with_capacity(sync_state.watched_transactions.len());
for txid in &sync_state.watched_transactions {
log_error!(self.logger, "Failed due to retrieving invalid tx data.");
return Err(InternalError::Failed);
}
- }
+ },
Err(electrum_client::Error::Protocol(_)) => {
// We couldn't find the tx, do nothing.
- }
+ },
Err(e) => {
log_error!(self.logger, "Failed to look up transaction {}: {}.", txid, e);
return Err(InternalError::Failed);
- }
+ },
}
}
if confirmed_txs.iter().any(|ctx| ctx.txid == **txid) {
continue;
}
- let mut filtered_history = script_history.iter().filter(|h| h.tx_hash == **txid);
- if let Some(history) = filtered_history.next()
- {
+ let mut filtered_history =
+ script_history.iter().filter(|h| h.tx_hash == **txid);
+ if let Some(history) = filtered_history.next() {
let prob_conf_height = history.height as u32;
let confirmed_tx = self.get_confirmed_tx(tx, prob_conf_height)?;
confirmed_txs.push(confirmed_tx);
debug_assert!(filtered_history.next().is_none());
}
- for (watched_output, script_history) in sync_state.watched_outputs.values()
- .zip(output_results)
+ for (watched_output, script_history) in
+ sync_state.watched_outputs.values().zip(output_results)
{
for possible_output_spend in script_history {
if possible_output_spend.height <= 0 {
Ok(tx) => {
let mut is_spend = false;
for txin in &tx.input {
- let watched_outpoint = watched_output.outpoint
- .into_bitcoin_outpoint();
+ let watched_outpoint =
+ watched_output.outpoint.into_bitcoin_outpoint();
if txin.previous_output == watched_outpoint {
is_spend = true;
break;
let prob_conf_height = possible_output_spend.height as u32;
let confirmed_tx = self.get_confirmed_tx(&tx, prob_conf_height)?;
confirmed_txs.push(confirmed_tx);
- }
+ },
Err(e) => {
- log_trace!(self.logger,
+ log_trace!(
+ self.logger,
"Inconsistency: Tx {} was unconfirmed during syncing: {}",
- txid, e);
+ txid,
+ e
+ );
return Err(InternalError::Inconsistency);
- }
+ },
}
}
}
- }
+ },
Err(e) => {
log_error!(self.logger, "Failed to look up script histories: {}.", e);
return Err(InternalError::Failed);
- }
+ },
}
// Sort all confirmed transactions first by block height, then by in-block
Ok(confirmed_txs)
}
- fn get_unconfirmed_transactions(
- &self, confirmables: &Vec<&(dyn Confirm + Sync + Send)>,
- ) -> Result<Vec<Txid>, InternalError> {
+ fn get_unconfirmed_transactions<C: Deref>(
+ &self, confirmables: &Vec<C>,
+ ) -> Result<Vec<Txid>, InternalError>
+ where
+ C::Target: Confirm,
+ {
// Query the interface for relevant txids and check whether the relevant blocks are still
// in the best chain, mark them unconfirmed otherwise
let relevant_txids = confirmables
Ok(unconfirmed_txs)
}
- fn get_confirmed_tx(&self, tx: &Transaction, prob_conf_height: u32)
- -> Result<ConfirmedTx, InternalError>
- {
+ fn get_confirmed_tx(
+ &self, tx: &Transaction, prob_conf_height: u32,
+ ) -> Result<ConfirmedTx, InternalError> {
let txid = tx.txid();
match self.client.transaction_get_merkle(&txid, prob_conf_height as usize) {
Ok(merkle_res) => {
match self.client.block_header(prob_conf_height as usize) {
Ok(block_header) => {
let pos = merkle_res.pos;
- if !self.validate_merkle_proof(&txid,
- &block_header.merkle_root, merkle_res)?
- {
- log_trace!(self.logger,
+ if !self.validate_merkle_proof(
+ &txid,
+ &block_header.merkle_root,
+ merkle_res,
+ )? {
+ log_trace!(
+ self.logger,
"Inconsistency: Block {} was unconfirmed during syncing.",
- block_header.block_hash());
+ block_header.block_hash()
+ );
return Err(InternalError::Inconsistency);
}
let confirmed_tx = ConfirmedTx {
tx: tx.clone(),
txid,
- block_header, block_height: prob_conf_height,
+ block_header,
+ block_height: prob_conf_height,
pos,
};
Ok(confirmed_tx)
- }
+ },
Err(e) => {
- log_error!(self.logger,
+ log_error!(
+ self.logger,
"Failed to retrieve block header for height {}: {}.",
- prob_conf_height, e);
+ prob_conf_height,
+ e
+ );
Err(InternalError::Failed)
- }
+ },
}
- }
+ },
Err(e) => {
- log_trace!(self.logger,
+ log_trace!(
+ self.logger,
"Inconsistency: Tx {} was unconfirmed during syncing: {}",
- txid, e);
+ txid,
+ e
+ );
Err(InternalError::Inconsistency)
- }
+ },
}
}
&self.client
}
- fn validate_merkle_proof(&self, txid: &Txid, merkle_root: &TxMerkleNode,
- merkle_res: GetMerkleRes) -> Result<bool, InternalError>
- {
+ fn validate_merkle_proof(
+ &self, txid: &Txid, merkle_root: &TxMerkleNode, merkle_res: GetMerkleRes,
+ ) -> Result<bool, InternalError> {
let mut index = merkle_res.pos;
let mut cur = txid.to_raw_hash();
for mut bytes in merkle_res.merkle {
bytes.reverse();
// unwrap() safety: `bytes` has len 32 so `from_slice` can never fail.
let next_hash = Sha256d::from_slice(&bytes).unwrap();
- let (left, right) = if index % 2 == 0 {
- (cur, next_hash)
- } else {
- (next_hash, cur)
- };
+ let (left, right) = if index % 2 == 0 { (cur, next_hash) } else { (next_hash, cur) };
let data = [&left[..], &right[..]].concat();
cur = Sha256d::hash(&data);
Self::Failed => write!(f, "Failed to conduct transaction sync."),
Self::Inconsistency => {
write!(f, "Encountered an inconsistency during transaction sync.")
- }
+ },
}
}
}
-use crate::error::{TxSyncError, InternalError};
-use crate::common::{SyncState, FilterQueue, ConfirmedTx};
+use crate::common::{ConfirmedTx, FilterQueue, SyncState};
+use crate::error::{InternalError, TxSyncError};
-use lightning::util::logger::Logger;
-use lightning::{log_error, log_debug, log_trace};
use lightning::chain::WatchedOutput;
use lightning::chain::{Confirm, Filter};
+use lightning::util::logger::Logger;
+use lightning::{log_debug, log_error, log_trace};
use bitcoin::{BlockHash, Script, Txid};
-use esplora_client::Builder;
-#[cfg(feature = "async-interface")]
-use esplora_client::r#async::AsyncClient;
#[cfg(not(feature = "async-interface"))]
use esplora_client::blocking::BlockingClient;
+#[cfg(feature = "async-interface")]
+use esplora_client::r#async::AsyncClient;
+use esplora_client::Builder;
-use std::collections::HashSet;
use core::ops::Deref;
+use std::collections::HashSet;
/// Synchronizes LDK with a given [`Esplora`] server.
///
pub fn from_client(client: EsploraClientType, logger: L) -> Self {
let sync_state = MutexType::new(SyncState::new());
let queue = std::sync::Mutex::new(FilterQueue::new());
- Self {
- sync_state,
- queue,
- client,
- logger,
- }
+ Self { sync_state, queue, client, logger }
}
/// Synchronizes the given `confirmables` via their [`Confirm`] interface implementations. This
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
/// [`Filter`]: lightning::chain::Filter
#[maybe_async]
- pub fn sync(&self, confirmables: Vec<&(dyn Confirm + Sync + Send)>) -> Result<(), TxSyncError> {
+ pub fn sync<C: Deref>(&self, confirmables: Vec<C>) -> Result<(), TxSyncError>
+ where
+ C::Target: Confirm,
+ {
// This lock makes sure we're syncing once at a time.
#[cfg(not(feature = "async-interface"))]
let mut sync_state = self.sync_state.lock().unwrap();
num_unconfirmed += unconfirmed_txs.len();
sync_state.sync_unconfirmed_transactions(
&confirmables,
- unconfirmed_txs
+ unconfirmed_txs,
);
- }
+ },
Err(err) => {
// (Semi-)permanent failure, retry later.
log_error!(self.logger,
);
sync_state.pending_sync = true;
return Err(TxSyncError::from(err));
- }
+ },
}
},
Err(err) => {
);
sync_state.pending_sync = true;
return Err(TxSyncError::from(err));
- }
+ },
}
- match maybe_await!(self.sync_best_block_updated(&confirmables, &mut sync_state, &tip_hash)) {
- Ok(()) => {}
+ match maybe_await!(self.sync_best_block_updated(
+ &confirmables,
+ &mut sync_state,
+ &tip_hash
+ )) {
+ Ok(()) => {},
Err(InternalError::Inconsistency) => {
// Immediately restart syncing when we encounter any inconsistencies.
- log_debug!(self.logger, "Encountered inconsistency during transaction sync, restarting.");
+ log_debug!(
+ self.logger,
+ "Encountered inconsistency during transaction sync, restarting."
+ );
sync_state.pending_sync = true;
continue;
- }
+ },
Err(err) => {
// (Semi-)permanent failure, retry later.
log_error!(self.logger,
);
sync_state.pending_sync = true;
return Err(TxSyncError::from(err));
- }
+ },
}
}
continue;
}
num_confirmed += confirmed_txs.len();
- sync_state.sync_confirmed_transactions(
- &confirmables,
- confirmed_txs
- );
- }
+ sync_state
+ .sync_confirmed_transactions(&confirmables, confirmed_txs);
+ },
Err(err) => {
// (Semi-)permanent failure, retry later.
log_error!(self.logger,
);
sync_state.pending_sync = true;
return Err(TxSyncError::from(err));
- }
+ },
}
- }
+ },
Err(InternalError::Inconsistency) => {
// Immediately restart syncing when we encounter any inconsistencies.
- log_debug!(self.logger, "Encountered inconsistency during transaction sync, restarting.");
+ log_debug!(
+ self.logger,
+ "Encountered inconsistency during transaction sync, restarting."
+ );
sync_state.pending_sync = true;
continue;
- }
+ },
Err(err) => {
// (Semi-)permanent failure, retry later.
log_error!(self.logger,
);
sync_state.pending_sync = true;
return Err(TxSyncError::from(err));
- }
+ },
}
sync_state.last_sync_hash = Some(tip_hash);
sync_state.pending_sync = false;
}
}
#[cfg(feature = "time")]
- log_debug!(self.logger, "Finished transaction sync at tip {} in {}ms: {} confirmed, {} unconfirmed.",
- tip_hash, start_time.elapsed().as_millis(), num_confirmed, num_unconfirmed);
+ log_debug!(
+ self.logger,
+ "Finished transaction sync at tip {} in {}ms: {} confirmed, {} unconfirmed.",
+ tip_hash,
+ start_time.elapsed().as_millis(),
+ num_confirmed,
+ num_unconfirmed
+ );
#[cfg(not(feature = "time"))]
- log_debug!(self.logger, "Finished transaction sync at tip {}: {} confirmed, {} unconfirmed.",
- tip_hash, num_confirmed, num_unconfirmed);
+ log_debug!(
+ self.logger,
+ "Finished transaction sync at tip {}: {} confirmed, {} unconfirmed.",
+ tip_hash,
+ num_confirmed,
+ num_unconfirmed
+ );
Ok(())
}
#[maybe_async]
- fn sync_best_block_updated(
- &self, confirmables: &Vec<&(dyn Confirm + Sync + Send)>, sync_state: &mut SyncState, tip_hash: &BlockHash,
- ) -> Result<(), InternalError> {
-
+ fn sync_best_block_updated<C: Deref>(
+ &self, confirmables: &Vec<C>, sync_state: &mut SyncState, tip_hash: &BlockHash,
+ ) -> Result<(), InternalError>
+ where
+ C::Target: Confirm,
+ {
// Inform the interface of the new block.
let tip_header = maybe_await!(self.client.get_header_by_hash(tip_hash))?;
let tip_status = maybe_await!(self.client.get_block_status(&tip_hash))?;
fn get_confirmed_transactions(
&self, sync_state: &SyncState,
) -> Result<Vec<ConfirmedTx>, InternalError> {
-
// First, check the confirmation status of registered transactions as well as the
// status of dependent transactions of registered outputs.
}
for (_, output) in &sync_state.watched_outputs {
- if let Some(output_status) = maybe_await!(self.client
+ if let Some(output_status) = maybe_await!(self
+ .client
.get_output_status(&output.outpoint.txid, output.outpoint.index as u64))?
{
if let Some(spending_txid) = output_status.txid {
}
}
- if let Some(confirmed_tx) = maybe_await!(self
- .get_confirmed_tx(
- spending_txid,
- spending_tx_status.block_hash,
- spending_tx_status.block_height,
- ))?
- {
+ if let Some(confirmed_tx) = maybe_await!(self.get_confirmed_tx(
+ spending_txid,
+ spending_tx_status.block_hash,
+ spending_tx_status.block_height,
+ ))? {
confirmed_txs.push(confirmed_tx);
}
}
let block_hash = block_header.block_hash();
if let Some(expected_block_hash) = expected_block_hash {
if expected_block_hash != block_hash {
- log_trace!(self.logger, "Inconsistency: Tx {} expected in block {}, but is confirmed in {}", txid, expected_block_hash, block_hash);
+ log_trace!(
+ self.logger,
+ "Inconsistency: Tx {} expected in block {}, but is confirmed in {}",
+ txid,
+ expected_block_hash,
+ block_hash
+ );
return Err(InternalError::Inconsistency);
}
}
} else {
// If any previously-confirmed block suddenly is no longer confirmed, we found
// an inconsistency and should start over.
- log_trace!(self.logger, "Inconsistency: Tx {} was unconfirmed during syncing.", txid);
+ log_trace!(
+ self.logger,
+ "Inconsistency: Tx {} was unconfirmed during syncing.",
+ txid
+ );
return Err(InternalError::Inconsistency);
}
}
}
#[maybe_async]
- fn get_unconfirmed_transactions(
- &self, confirmables: &Vec<&(dyn Confirm + Sync + Send)>,
- ) -> Result<Vec<Txid>, InternalError> {
+ fn get_unconfirmed_transactions<C: Deref>(
+ &self, confirmables: &Vec<C>,
+ ) -> Result<Vec<Txid>, InternalError>
+ where
+ C::Target: Confirm,
+ {
// Query the interface for relevant txids and check whether the relevant blocks are still
// in the best chain, mark them unconfirmed otherwise
let relevant_txids = confirmables
#[cfg(not(feature = "async-interface"))]
type EsploraClientType = BlockingClient;
-
impl<L: Deref> Filter for EsploraSyncClient<L>
where
L::Target: Logger,
#![deny(rustdoc::broken_intra_doc_links)]
#![deny(rustdoc::private_intra_doc_links)]
-
#![deny(missing_docs)]
#![deny(unsafe_code)]
-
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#[cfg(any(feature = "esplora-blocking", feature = "esplora-async"))]
#[cfg(any(feature = "esplora-blocking", feature = "esplora-async", feature = "electrum"))]
pub use error::TxSyncError;
-#[cfg(any(feature = "esplora-blocking", feature = "esplora-async"))]
-pub use esplora::EsploraSyncClient;
#[cfg(feature = "electrum")]
pub use electrum::ElectrumSyncClient;
+#[cfg(any(feature = "esplora-blocking", feature = "esplora-async"))]
+pub use esplora::EsploraSyncClient;
-#![cfg(all(not(target_os = "windows"), any(feature = "esplora-blocking", feature = "esplora-async", feature = "electrum")))]
+#![cfg(all(
+ not(target_os = "windows"),
+ any(feature = "esplora-blocking", feature = "esplora-async", feature = "electrum")
+))]
-#[cfg(any(feature = "esplora-blocking", feature = "esplora-async"))]
-use lightning_transaction_sync::EsploraSyncClient;
-#[cfg(feature = "electrum")]
-use lightning_transaction_sync::ElectrumSyncClient;
-use lightning::chain::{Confirm, Filter, WatchedOutput};
use lightning::chain::transaction::{OutPoint, TransactionData};
+use lightning::chain::{Confirm, Filter, WatchedOutput};
use lightning::util::test_utils::TestLogger;
+#[cfg(feature = "electrum")]
+use lightning_transaction_sync::ElectrumSyncClient;
+#[cfg(any(feature = "esplora-blocking", feature = "esplora-async"))]
+use lightning_transaction_sync::EsploraSyncClient;
-use electrsd::{bitcoind, bitcoind::BitcoinD, ElectrsD};
-use bitcoin::{Amount, Txid, BlockHash};
+use bdk_macros::maybe_await;
use bitcoin::blockdata::block::Header;
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::network::Network;
-use electrsd::bitcoind::bitcoincore_rpc::bitcoincore_rpc_json::AddressType;
+use bitcoin::{Amount, BlockHash, Txid};
use bitcoind::bitcoincore_rpc::RpcApi;
-use bdk_macros::maybe_await;
+use electrsd::bitcoind::bitcoincore_rpc::bitcoincore_rpc_json::AddressType;
+use electrsd::{bitcoind, bitcoind::BitcoinD, ElectrsD};
+use std::collections::{HashMap, HashSet};
use std::env;
use std::sync::Mutex;
use std::time::Duration;
-use std::collections::{HashMap, HashSet};
pub fn setup_bitcoind_and_electrsd() -> (BitcoinD, ElectrsD) {
let bitcoind_exe =
// it didn't. Since we can't proceed without subscribing, we try again after a delay
// and panic if it still fails.
std::thread::sleep(Duration::from_secs(1));
- electrsd.client.block_headers_subscribe_raw().expect("failed to subscribe to block headers")
- }
+ electrsd
+ .client
+ .block_headers_subscribe_raw()
+ .expect("failed to subscribe to block headers")
+ },
};
loop {
if header.height >= min_height {
None if delay.as_millis() < 512 => {
delay = delay.mul_f32(2.0);
tries += 1;
- }
+ },
None if tries == 10 => panic!("Exceeded our maximum wait time."),
None => tries += 1,
}
let block_hash = header.block_hash();
self.confirmed_txs.lock().unwrap().insert(txid, (block_hash, height));
self.unconfirmed_txs.lock().unwrap().remove(&txid);
- self.events.lock().unwrap().push(TestConfirmableEvent::Confirmed(txid, block_hash, height));
+ let event = TestConfirmableEvent::Confirmed(txid, block_hash, height);
+ self.events.lock().unwrap().push(event);
}
}
fn best_block_updated(&self, header: &Header, height: u32) {
let block_hash = header.block_hash();
*self.best_block.lock().unwrap() = (block_hash, height);
- self.events.lock().unwrap().push(TestConfirmableEvent::BestBlockUpdated(block_hash, height));
+ let event = TestConfirmableEvent::BestBlockUpdated(block_hash, height);
+ self.events.lock().unwrap().push(event);
}
fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
- self.confirmed_txs.lock().unwrap().iter().map(|(&txid, (hash, height))| (txid, *height, Some(*hash))).collect::<Vec<_>>()
+ let lock = self.confirmed_txs.lock().unwrap();
+ lock.iter().map(|(&txid, (hash, height))| (txid, *height, Some(*hash))).collect()
}
}
assert_eq!(events.len(), 1);
// Check registered confirmed transactions are marked confirmed
- let new_address = $bitcoind.client.get_new_address(Some("test"),
- Some(AddressType::Legacy)).unwrap().assume_checked();
- let txid = $bitcoind.client.send_to_address(&new_address, Amount::from_sat(5000), None, None,
- None, None, None, None).unwrap();
- let second_txid = $bitcoind.client.send_to_address(&new_address, Amount::from_sat(5000), None,
- None, None, None, None, None).unwrap();
+ let new_address = $bitcoind
+ .client
+ .get_new_address(Some("test"), Some(AddressType::Legacy))
+ .unwrap()
+ .assume_checked();
+ let txid = $bitcoind
+ .client
+ .send_to_address(
+ &new_address,
+ Amount::from_sat(5000),
+ None,
+ None,
+ None,
+ None,
+ None,
+ None,
+ )
+ .unwrap();
+ let second_txid = $bitcoind
+ .client
+ .send_to_address(
+ &new_address,
+ Amount::from_sat(5000),
+ None,
+ None,
+ None,
+ None,
+ None,
+ None,
+ )
+ .unwrap();
$tx_sync.register_tx(&txid, &new_address.payload().script_pubkey());
maybe_await!($tx_sync.sync(vec![&$confirmable])).unwrap();
let block_hash = tx_res.info.blockhash.unwrap();
let tx = tx_res.transaction().unwrap();
let prev_outpoint = tx.input.first().unwrap().previous_output;
- let prev_tx = $bitcoind.client.get_transaction(&prev_outpoint.txid, None).unwrap().transaction()
+ let prev_tx = $bitcoind
+ .client
+ .get_transaction(&prev_outpoint.txid, None)
+ .unwrap()
+ .transaction()
.unwrap();
let prev_script_pubkey = prev_tx.output[prev_outpoint.vout as usize].script_pubkey.clone();
let output = WatchedOutput {
block_hash: Some(block_hash),
outpoint: OutPoint { txid: prev_outpoint.txid, index: prev_outpoint.vout as u16 },
- script_pubkey: prev_script_pubkey
+ script_pubkey: prev_script_pubkey,
};
$tx_sync.register_output(output);
},
commitment_txid: htlc.commitment_txid,
per_commitment_number: htlc.per_commitment_number,
- per_commitment_point: self.onchain_tx_handler.signer.get_per_commitment_point(
- htlc.per_commitment_number, &self.onchain_tx_handler.secp_ctx,
- ),
+ per_commitment_point: htlc.per_commitment_point,
feerate_per_kw: 0,
htlc: htlc.htlc,
preimage: htlc.preimage,
use bitcoin::hashes::{Hash, HashEngine};
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hash_types::{Txid, BlockHash};
+use bitcoin::secp256k1::PublicKey;
use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
use bitcoin::secp256k1;
pub(crate) htlc: HTLCOutputInCommitment,
pub(crate) preimage: Option<PaymentPreimage>,
pub(crate) counterparty_sig: Signature,
+ pub(crate) per_commitment_point: PublicKey,
}
// Represents the different types of claims for which events are yielded externally to satisfy said
htlc: htlc.clone(),
preimage: *preimage,
counterparty_sig: counterparty_htlc_sig,
+ per_commitment_point: trusted_tx.per_commitment_point(),
}
})
};
FundingBatchClosure,
/// One of our HTLCs timed out in a channel, causing us to force close the channel.
HTLCsTimedOut,
+ /// Our peer provided a feerate which violated our required minimum (fetched from our
+ /// [`FeeEstimator`] either as [`ConfirmationTarget::MinAllowedAnchorChannelRemoteFee`] or
+ /// [`ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee`]).
+ ///
+ /// [`FeeEstimator`]: crate::chain::chaininterface::FeeEstimator
+ /// [`ConfirmationTarget::MinAllowedAnchorChannelRemoteFee`]: crate::chain::chaininterface::ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
+ /// [`ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee`]: crate::chain::chaininterface::ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
+ PeerFeerateTooLow {
+ /// The feerate on our channel set by our peer.
+ peer_feerate_sat_per_kw: u32,
+ /// The required feerate we enforce, from our [`FeeEstimator`].
+ ///
+ /// [`FeeEstimator`]: crate::chain::chaininterface::FeeEstimator
+ required_feerate_sat_per_kw: u32,
+ },
}
impl core::fmt::Display for ClosureReason {
ClosureReason::CounterpartyCoopClosedUnfundedChannel => f.write_str("the peer requested the unfunded channel be closed"),
ClosureReason::FundingBatchClosure => f.write_str("another channel in the same funding batch closed"),
ClosureReason::HTLCsTimedOut => f.write_str("htlcs on the channel timed out"),
+ ClosureReason::PeerFeerateTooLow { peer_feerate_sat_per_kw, required_feerate_sat_per_kw } =>
+ f.write_fmt(format_args!(
+ "peer provided a feerate ({} sat/kw) which was below our lower bound ({} sat/kw)",
+ peer_feerate_sat_per_kw, required_feerate_sat_per_kw,
+ )),
}
}
}
(17, CounterpartyInitiatedCooperativeClosure) => {},
(19, LocallyInitiatedCooperativeClosure) => {},
(21, HTLCsTimedOut) => {},
+ (23, PeerFeerateTooLow) => {
+ (0, peer_feerate_sat_per_kw, required),
+ (2, required_feerate_sat_per_kw, required),
+ },
);
/// Intended destination of a failed HTLC as indicated in [`Event::HTLCHandlingFailed`].
pub(super) enum ChannelError {
Ignore(String),
Warn(String),
- Close(String),
+ Close((String, ClosureReason)),
}
impl fmt::Debug for ChannelError {
match self {
&ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
&ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
- &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
+ &ChannelError::Close((ref e, _)) => write!(f, "Close : {}", e),
}
}
}
match self {
&ChannelError::Ignore(ref e) => write!(f, "{}", e),
&ChannelError::Warn(ref e) => write!(f, "{}", e),
- &ChannelError::Close(ref e) => write!(f, "{}", e),
+ &ChannelError::Close((ref e, _)) => write!(f, "{}", e),
}
}
}
+impl ChannelError {
+ pub(super) fn close(err: String) -> Self {
+ ChannelError::Close((err.clone(), ClosureReason::ProcessingError { err }))
+ }
+}
+
pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
pub logger: &'a L,
pub peer_id: Option<PublicKey>,
($res: expr, $err: expr) => {
match $res {
Ok(thing) => thing,
- Err(_) => return Err(ChannelError::Close($err)),
+ Err(_) => return Err(ChannelError::close($err)),
}
};
}
pub(crate) channel_funding_txo: Option<OutPoint>,
}
+/// Tracks the transaction number, along with current and next commitment points.
+/// This consolidates the logic to advance our commitment number and request new
+/// commitment points from our signer.
+#[derive(Debug, Copy, Clone)]
+enum HolderCommitmentPoint {
+ // TODO: add a variant for before our first commitment point is retrieved
+ /// We've advanced our commitment number and are waiting on the next commitment point.
+ /// Until the `get_per_commitment_point` signer method becomes async, this variant
+ /// will not be used.
+ PendingNext { transaction_number: u64, current: PublicKey },
+ /// Our current commitment point is ready, we've cached our next point,
+ /// and we are not pending a new one.
+ Available { transaction_number: u64, current: PublicKey, next: PublicKey },
+}
+
+impl HolderCommitmentPoint {
+ pub fn new<SP: Deref>(signer: &ChannelSignerType<SP>, secp_ctx: &Secp256k1<secp256k1::All>) -> Self
+ where SP::Target: SignerProvider
+ {
+ HolderCommitmentPoint::Available {
+ transaction_number: INITIAL_COMMITMENT_NUMBER,
+ current: signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, secp_ctx),
+ next: signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, secp_ctx),
+ }
+ }
+
+ pub fn is_available(&self) -> bool {
+ if let HolderCommitmentPoint::Available { .. } = self { true } else { false }
+ }
+
+ pub fn transaction_number(&self) -> u64 {
+ match self {
+ HolderCommitmentPoint::PendingNext { transaction_number, .. } => *transaction_number,
+ HolderCommitmentPoint::Available { transaction_number, .. } => *transaction_number,
+ }
+ }
+
+ pub fn current_point(&self) -> PublicKey {
+ match self {
+ HolderCommitmentPoint::PendingNext { current, .. } => *current,
+ HolderCommitmentPoint::Available { current, .. } => *current,
+ }
+ }
+
+ pub fn next_point(&self) -> Option<PublicKey> {
+ match self {
+ HolderCommitmentPoint::PendingNext { .. } => None,
+ HolderCommitmentPoint::Available { next, .. } => Some(*next),
+ }
+ }
+
+ pub fn advance<SP: Deref, L: Deref>(&mut self, signer: &ChannelSignerType<SP>, secp_ctx: &Secp256k1<secp256k1::All>, logger: &L)
+ where SP::Target: SignerProvider, L::Target: Logger
+ {
+ if let HolderCommitmentPoint::Available { transaction_number, next, .. } = self {
+ *self = HolderCommitmentPoint::PendingNext {
+ transaction_number: *transaction_number - 1,
+ current: *next,
+ };
+ }
+
+ if let HolderCommitmentPoint::PendingNext { transaction_number, current } = self {
+ let next = signer.as_ref().get_per_commitment_point(*transaction_number - 1, secp_ctx);
+ log_trace!(logger, "Retrieved next per-commitment point {}", *transaction_number - 1);
+ *self = HolderCommitmentPoint::Available { transaction_number: *transaction_number, current: *current, next };
+ }
+ }
+}
+
/// If the majority of the channels funds are to the fundee and the initiator holds only just
/// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
/// initiator controls the feerate, if they then go to increase the channel fee, they may have no
// generation start at 0 and count up...this simplifies some parts of implementation at the
// cost of others, but should really just be changed.
- cur_holder_commitment_transaction_number: u64,
+ holder_commitment_point: HolderCommitmentPoint,
cur_counterparty_commitment_transaction_number: u64,
value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
pending_inbound_htlcs: Vec<InboundHTLCOutput>,
let pubkeys = holder_signer.pubkeys().clone();
if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
- return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
+ return Err(ChannelError::close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
}
// Check sanity of message fields:
if channel_value_satoshis > config.channel_handshake_limits.max_funding_satoshis {
- return Err(ChannelError::Close(format!(
+ return Err(ChannelError::close(format!(
"Per our config, funding must be at most {}. It was {}. Peer contribution: {}. Our contribution: {}",
config.channel_handshake_limits.max_funding_satoshis, channel_value_satoshis,
open_channel_fields.funding_satoshis, our_funding_satoshis)));
}
if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
- return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", channel_value_satoshis)));
+ return Err(ChannelError::close(format!("Funding must be smaller than the total bitcoin supply. It was {}", channel_value_satoshis)));
}
if msg_channel_reserve_satoshis > channel_value_satoshis {
- return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be no greater than channel_value_satoshis: {}", msg_channel_reserve_satoshis, channel_value_satoshis)));
+ return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must be no greater than channel_value_satoshis: {}", msg_channel_reserve_satoshis, channel_value_satoshis)));
}
let full_channel_value_msat = (channel_value_satoshis - msg_channel_reserve_satoshis) * 1000;
if msg_push_msat > full_channel_value_msat {
- return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg_push_msat, full_channel_value_msat)));
+ return Err(ChannelError::close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg_push_msat, full_channel_value_msat)));
}
if open_channel_fields.dust_limit_satoshis > channel_value_satoshis {
- return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than channel_value_satoshis {}. Peer never wants payout outputs?", open_channel_fields.dust_limit_satoshis, channel_value_satoshis)));
+ return Err(ChannelError::close(format!("dust_limit_satoshis {} was larger than channel_value_satoshis {}. Peer never wants payout outputs?", open_channel_fields.dust_limit_satoshis, channel_value_satoshis)));
}
if open_channel_fields.htlc_minimum_msat >= full_channel_value_msat {
- return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", open_channel_fields.htlc_minimum_msat, full_channel_value_msat)));
+ return Err(ChannelError::close(format!("Minimum htlc value ({}) was larger than full channel value ({})", open_channel_fields.htlc_minimum_msat, full_channel_value_msat)));
}
Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, open_channel_fields.commitment_feerate_sat_per_1000_weight, None, &&logger)?;
let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
if open_channel_fields.to_self_delay > max_counterparty_selected_contest_delay {
- return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, open_channel_fields.to_self_delay)));
+ return Err(ChannelError::close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, open_channel_fields.to_self_delay)));
}
if open_channel_fields.max_accepted_htlcs < 1 {
- return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
+ return Err(ChannelError::close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
}
if open_channel_fields.max_accepted_htlcs > MAX_HTLCS {
- return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", open_channel_fields.max_accepted_htlcs, MAX_HTLCS)));
+ return Err(ChannelError::close(format!("max_accepted_htlcs was {}. It must not be larger than {}", open_channel_fields.max_accepted_htlcs, MAX_HTLCS)));
}
// Now check against optional parameters as set by config...
if channel_value_satoshis < config.channel_handshake_limits.min_funding_satoshis {
- return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", channel_value_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
+ return Err(ChannelError::close(format!("Funding satoshis ({}) is less than the user specified limit ({})", channel_value_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
}
if open_channel_fields.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
- return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", open_channel_fields.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
+ return Err(ChannelError::close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", open_channel_fields.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
}
if open_channel_fields.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
- return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", open_channel_fields.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
+ return Err(ChannelError::close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", open_channel_fields.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
}
if msg_channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
- return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg_channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
+ return Err(ChannelError::close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg_channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
}
if open_channel_fields.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
- return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", open_channel_fields.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
+ return Err(ChannelError::close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", open_channel_fields.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
}
if open_channel_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+ return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
}
if open_channel_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
+ return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
}
// Convert things into internal flags and prep our state:
if config.channel_handshake_limits.force_announced_channel_preference {
if config.channel_handshake_config.announced_channel != announced_channel {
- return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
+ return Err(ChannelError::close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
}
}
if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
// Protocol level safety check in place, although it should never happen because
// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
- return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+ return Err(ChannelError::close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
}
if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
- return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg_push_msat)));
+ return Err(ChannelError::close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg_push_msat)));
}
if msg_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
msg_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
}
if holder_selected_channel_reserve_satoshis < open_channel_fields.dust_limit_satoshis {
- return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", open_channel_fields.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
+ return Err(ChannelError::close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", open_channel_fields.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
}
// check if the funder's amount for the initial commitment tx is sufficient
let funders_amount_msat = open_channel_fields.funding_satoshis * 1000 - msg_push_msat;
let commitment_tx_fee = commit_tx_fee_msat(open_channel_fields.commitment_feerate_sat_per_1000_weight, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
- return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
+ return Err(ChannelError::close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
}
let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
// While it's reasonable for us to not meet the channel reserve initially (if they don't
// want to push much to us), our counterparty should always have more than our reserve.
if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
- return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
+ return Err(ChannelError::close("Insufficient funding amount for initial reserve".to_owned()));
}
let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
None
} else {
if !script::is_bolt2_compliant(&script, their_features) {
- return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
+ return Err(ChannelError::close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
}
Some(script.clone())
}
},
// Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
&None => {
- return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
+ return Err(ChannelError::close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
}
}
} else { None };
let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
match signer_provider.get_shutdown_scriptpubkey() {
Ok(scriptpubkey) => Some(scriptpubkey),
- Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
+ Err(_) => return Err(ChannelError::close("Failed to get upfront shutdown scriptpubkey".to_owned())),
}
} else { None };
if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
if !shutdown_scriptpubkey.is_compatible(&their_features) {
- return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
+ return Err(ChannelError::close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
}
}
let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
Ok(script) => script,
- Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
+ Err(_) => return Err(ChannelError::close("Failed to get destination script".to_owned())),
};
let mut secp_ctx = Secp256k1::new();
let value_to_self_msat = our_funding_satoshis * 1000 + msg_push_msat;
+ let holder_signer = ChannelSignerType::Ecdsa(holder_signer);
+ let holder_commitment_point = HolderCommitmentPoint::new(&holder_signer, &secp_ctx);
+
// TODO(dual_funding): Checks for `funding_feerate_sat_per_1000_weight`?
let channel_context = ChannelContext {
latest_monitor_update_id: 0,
- holder_signer: ChannelSignerType::Ecdsa(holder_signer),
+ holder_signer,
shutdown_scriptpubkey,
destination_script,
- cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+ holder_commitment_point,
cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
value_to_self_msat,
let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
+ let holder_signer = ChannelSignerType::Ecdsa(holder_signer);
+ let holder_commitment_point = HolderCommitmentPoint::new(&holder_signer, &secp_ctx);
+
Ok(Self {
user_id,
latest_monitor_update_id: 0,
- holder_signer: ChannelSignerType::Ecdsa(holder_signer),
+ holder_signer,
shutdown_scriptpubkey,
destination_script,
- cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+ holder_commitment_point,
cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
value_to_self_msat,
/// our counterparty!)
/// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
/// TODO Some magic rust shit to compile-time check this?
- fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
- let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
+ fn build_holder_transaction_keys(&self) -> TxCreationKeys {
+ let per_commitment_point = self.holder_commitment_point.current_point();
let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
let counterparty_pubkeys = self.get_counterparty_pubkeys();
return Ok(());
}
}
- return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
+ return Err(ChannelError::Close((format!(
+ "Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit
+ ), ClosureReason::PeerFeerateTooLow {
+ peer_feerate_sat_per_kw: feerate_per_kw,
+ required_feerate_sat_per_kw: lower_limit,
+ })));
}
Ok(())
}
}
// If we reconnected before sending our `channel_ready` they may still resend theirs.
ChannelState::ChannelReady(_) => check_reconnection = true,
- _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
+ _ => return Err(ChannelError::close("Peer sent a channel_ready at a strange time".to_owned())),
}
if check_reconnection {
// They probably disconnected/reconnected and re-sent the channel_ready, which is
).expect("We already advanced, so previous secret keys should have been validated already")))
};
if expected_point != Some(msg.next_per_commitment_point) {
- return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
+ return Err(ChannelError::close("Peer sent a reconnect channel_ready with a different point".to_owned()));
}
return Ok(None);
}
fee_estimator: &LowerBoundedFeeEstimator<F>,
) -> Result<(), ChannelError> where F::Target: FeeEstimator {
if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
- return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
+ return Err(ChannelError::close("Got add HTLC message when channel was not in an operational state".to_owned()));
}
// If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
if self.context.channel_state.is_remote_shutdown_sent() {
- return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
+ return Err(ChannelError::close("Got add HTLC message when channel was not in an operational state".to_owned()));
}
if self.context.channel_state.is_peer_disconnected() {
- return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
+ return Err(ChannelError::close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
}
if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
- return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
+ return Err(ChannelError::close("Remote side tried to send more than the total value of the channel".to_owned()));
}
if msg.amount_msat == 0 {
- return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
+ return Err(ChannelError::close("Remote side tried to send a 0-msat HTLC".to_owned()));
}
if msg.amount_msat < self.context.holder_htlc_minimum_msat {
- return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
+ return Err(ChannelError::close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
}
let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator);
let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate);
if htlc_stats.pending_inbound_htlcs + 1 > self.context.holder_max_accepted_htlcs as usize {
- return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
+ return Err(ChannelError::close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
}
if htlc_stats.pending_inbound_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
- return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
+ return Err(ChannelError::close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
}
// Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
let pending_remote_value_msat =
self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
if pending_remote_value_msat < msg.amount_msat {
- return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
+ return Err(ChannelError::close("Remote HTLC add would overdraw remaining funds".to_owned()));
}
// Check that the remote can afford to pay for this HTLC on-chain at the current
0
};
if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
- return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
+ return Err(ChannelError::close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
};
if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
- return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
+ return Err(ChannelError::close("Remote HTLC add would put them under remote reserve value".to_owned()));
}
}
let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
- return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
+ return Err(ChannelError::close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
}
}
if self.context.next_counterparty_htlc_id != msg.htlc_id {
- return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
+ return Err(ChannelError::close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
}
if msg.cltv_expiry >= 500000000 {
- return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
+ return Err(ChannelError::close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
}
if self.context.channel_state.is_local_shutdown_sent() {
Some(payment_preimage) => {
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
if payment_hash != htlc.payment_hash {
- return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
+ return Err(ChannelError::close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
}
OutboundHTLCOutcome::Success(Some(payment_preimage))
}
};
match htlc.state {
OutboundHTLCState::LocalAnnounced(_) =>
- return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
+ return Err(ChannelError::close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
OutboundHTLCState::Committed => {
htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
},
OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
- return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
+ return Err(ChannelError::close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
}
return Ok(htlc);
}
}
- Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
+ Err(ChannelError::close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
}
pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option<u64>), ChannelError> {
if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
- return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
+ return Err(ChannelError::close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
}
if self.context.channel_state.is_peer_disconnected() {
- return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
+ return Err(ChannelError::close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
}
self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat))
pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
- return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
+ return Err(ChannelError::close("Got fail HTLC message when channel was not in an operational state".to_owned()));
}
if self.context.channel_state.is_peer_disconnected() {
- return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
+ return Err(ChannelError::close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
}
self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
- return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
+ return Err(ChannelError::close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
}
if self.context.channel_state.is_peer_disconnected() {
- return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
+ return Err(ChannelError::close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
}
self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
where L::Target: Logger
{
if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
- return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
+ return Err(ChannelError::close("Got commitment signed message when channel was not in an operational state".to_owned()));
}
if self.context.channel_state.is_peer_disconnected() {
- return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
+ return Err(ChannelError::close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
}
if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
- return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
+ return Err(ChannelError::close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
}
let funding_script = self.context.get_funding_redeemscript();
- let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+ let keys = self.context.build_holder_transaction_keys();
- let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
+ let commitment_stats = self.context.build_commitment_transaction(self.context.holder_commitment_point.transaction_number(), &keys, true, false, logger);
let commitment_txid = {
let trusted_tx = commitment_stats.tx.trust();
let bitcoin_tx = trusted_tx.built_transaction();
log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
- return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
+ return Err(ChannelError::close("Invalid commitment tx signature from peer".to_owned()));
}
bitcoin_tx.txid
};
debug_assert!(!self.context.is_outbound());
let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
- return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
+ return Err(ChannelError::close("Funding remote cannot afford proposed new fee".to_owned()));
}
}
#[cfg(any(test, fuzzing))]
}
if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
- return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
+ return Err(ChannelError::close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
}
// Up to LDK 0.0.115, HTLC information was required to be duplicated in the
log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
- return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
+ return Err(ChannelError::close("Invalid HTLC tx signature from peer".to_owned()));
}
if !separate_nondust_htlc_sources {
htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
);
self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
- .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
+ .map_err(|_| ChannelError::close("Failed to validate our commitment".to_owned()))?;
// Update state now that we've passed all the can-fail calls...
let mut need_commitment = false;
channel_id: Some(self.context.channel_id()),
};
- self.context.cur_holder_commitment_transaction_number -= 1;
+ self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger);
self.context.expecting_peer_commitment_signed = false;
// Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
// build_commitment_no_status_check() next which will reset this to RAAFirst.
where F::Target: FeeEstimator, L::Target: Logger,
{
if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
- return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
+ return Err(ChannelError::close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
}
if self.context.channel_state.is_peer_disconnected() {
- return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
+ return Err(ChannelError::close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
}
if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
- return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
+ return Err(ChannelError::close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
}
let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
- return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
+ return Err(ChannelError::close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
}
}
// lot of work, and there's some chance this is all a misunderstanding anyway.
// We have to do *something*, though, since our signer may get mad at us for otherwise
// jumping a remote commitment number, so best to just force-close and move on.
- return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
+ return Err(ChannelError::close("Received an unexpected revoke_and_ack".to_owned()));
}
#[cfg(any(test, fuzzing))]
ecdsa.validate_counterparty_revocation(
self.context.cur_counterparty_commitment_transaction_number + 1,
&secret
- ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
+ ).map_err(|_| ChannelError::close("Failed to validate revocation from peer".to_owned()))?;
},
// TODO (taproot|arik)
#[cfg(taproot)]
};
self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
- .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
+ .map_err(|_| ChannelError::close("Previous secrets did not match new one".to_owned()))?;
self.context.latest_monitor_update_id += 1;
let mut monitor_update = ChannelMonitorUpdate {
update_id: self.context.latest_monitor_update_id,
// Before proposing a feerate update, check that we can actually afford the new fee.
let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator);
let htlc_stats = self.context.get_pending_htlc_stats(Some(feerate_per_kw), dust_exposure_limiting_feerate);
- let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
- let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
+ let keys = self.context.build_holder_transaction_keys();
+ let commitment_stats = self.context.build_commitment_transaction(self.context.holder_commitment_point.transaction_number(), &keys, true, true, logger);
let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + htlc_stats.on_holder_tx_outbound_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
let holder_balance_msat = commitment_stats.local_balance_msat - htlc_stats.outbound_holding_cell_msat;
if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
"Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
self.context.monitor_pending_channel_ready = false;
- let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
- Some(msgs::ChannelReady {
- channel_id: self.context.channel_id(),
- next_per_commitment_point,
- short_channel_id_alias: Some(self.context.outbound_scid_alias),
- })
+ Some(self.get_channel_ready())
} else { None };
let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
}
}
+ pub fn check_for_stale_feerate<L: Logger>(&mut self, logger: &L, min_feerate: u32) -> Result<(), ClosureReason> {
+ if self.context.is_outbound() {
+ // While its possible our fee is too low for an outbound channel because we've been
+ // unable to increase the fee, we don't try to force-close directly here.
+ return Ok(());
+ }
+ if self.context.feerate_per_kw < min_feerate {
+ log_info!(logger,
+ "Closing channel as feerate of {} is below required {} (the minimum required rate over the past day)",
+ self.context.feerate_per_kw, min_feerate
+ );
+ Err(ClosureReason::PeerFeerateTooLow {
+ peer_feerate_sat_per_kw: self.context.feerate_per_kw,
+ required_feerate_sat_per_kw: min_feerate,
+ })
+ } else {
+ Ok(())
+ }
+ }
+
pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
where F::Target: FeeEstimator, L::Target: Logger
{
if self.context.is_outbound() {
- return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
+ return Err(ChannelError::close("Non-funding remote tried to update channel fee".to_owned()));
}
if self.context.channel_state.is_peer_disconnected() {
- return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
+ return Err(ChannelError::close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
}
Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate);
let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate);
if htlc_stats.on_holder_tx_dust_exposure_msat > max_dust_htlc_exposure_msat {
- return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
+ return Err(ChannelError::close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
msg.feerate_per_kw, htlc_stats.on_holder_tx_dust_exposure_msat)));
}
if htlc_stats.on_counterparty_tx_dust_exposure_msat > max_dust_htlc_exposure_msat {
- return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
+ return Err(ChannelError::close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
msg.feerate_per_kw, htlc_stats.on_counterparty_tx_dust_exposure_msat)));
}
Ok(())
self.context.get_funding_signed_msg(logger).1
} else { None };
let channel_ready = if funding_signed.is_some() {
- self.check_get_channel_ready(0)
+ self.check_get_channel_ready(0, logger)
} else { None };
log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
}
fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
- let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
- let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
+ debug_assert!(self.context.holder_commitment_point.transaction_number() <= INITIAL_COMMITMENT_NUMBER + 2);
+ // TODO: handle non-available case when get_per_commitment_point becomes async
+ debug_assert!(self.context.holder_commitment_point.is_available());
+ let next_per_commitment_point = self.context.holder_commitment_point.current_point();
+ let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.holder_commitment_point.transaction_number() + 2);
msgs::RevokeAndACK {
channel_id: self.context.channel_id,
per_commitment_secret,
// While BOLT 2 doesn't indicate explicitly we should error this channel here, it
// almost certainly indicates we are going to end up out-of-sync in some way, so we
// just close here instead of trying to recover.
- return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
+ return Err(ChannelError::close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
}
if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
msg.next_local_commitment_number == 0 {
- return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
+ return Err(ChannelError::close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
}
- let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
+ let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.holder_commitment_point.transaction_number() - 1;
if msg.next_remote_commitment_number > 0 {
let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
- .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
+ .map_err(|_| ChannelError::close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
- return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
+ return Err(ChannelError::close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
}
if msg.next_remote_commitment_number > our_commitment_transaction {
macro_rules! log_and_panic {
if !self.context.channel_state.is_our_channel_ready() ||
self.context.channel_state.is_monitor_update_in_progress() {
if msg.next_remote_commitment_number != 0 {
- return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
+ return Err(ChannelError::close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
}
// Short circuit the whole handler as there is nothing we can resend them
return Ok(ReestablishResponses {
}
// We have OurChannelReady set!
- let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
return Ok(ReestablishResponses {
- channel_ready: Some(msgs::ChannelReady {
- channel_id: self.context.channel_id(),
- next_per_commitment_point,
- short_channel_id_alias: Some(self.context.outbound_scid_alias),
- }),
+ channel_ready: Some(self.get_channel_ready()),
raa: None, commitment_update: None,
order: RAACommitmentOrder::CommitmentFirst,
shutdown_msg, announcement_sigs,
}
} else {
debug_assert!(false, "All values should have been handled in the four cases above");
- return Err(ChannelError::Close(format!(
+ return Err(ChannelError::close(format!(
"Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
msg.next_remote_commitment_number,
our_commitment_transaction
}
let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
- let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
+ let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.holder_commitment_point.transaction_number() == 1 {
// We should never have to worry about MonitorUpdateInProgress resending ChannelReady
- let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
- Some(msgs::ChannelReady {
- channel_id: self.context.channel_id(),
- next_per_commitment_point,
- short_channel_id_alias: Some(self.context.outbound_scid_alias),
- })
+ Some(self.get_channel_ready())
} else { None };
if msg.next_local_commitment_number == next_counterparty_commitment_number {
})
}
} else if msg.next_local_commitment_number < next_counterparty_commitment_number {
- Err(ChannelError::Close(format!(
+ Err(ChannelError::close(format!(
"Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
msg.next_local_commitment_number,
next_counterparty_commitment_number,
)))
} else {
- Err(ChannelError::Close(format!(
+ Err(ChannelError::close(format!(
"Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
msg.next_local_commitment_number,
next_counterparty_commitment_number,
pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
if self.closing_negotiation_ready() {
if self.context.closing_signed_in_flight {
- return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
+ return Err(ChannelError::close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
} else {
self.context.closing_signed_in_flight = true;
}
ChannelSignerType::Ecdsa(ecdsa) => {
let sig = ecdsa
.sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
- .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
+ .map_err(|()| ChannelError::close("Failed to get signature for closing transaction.".to_owned()))?;
self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
Ok((Some(msgs::ClosingSigned {
) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
{
if self.context.channel_state.is_peer_disconnected() {
- return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
+ return Err(ChannelError::close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
}
if self.context.channel_state.is_pre_funded_state() {
// Spec says we should fail the connection, not the channel, but that's nonsense, there
// are plenty of reasons you may want to fail a channel pre-funding, and spec says you
// can do that via error message without getting a connection fail anyway...
- return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
+ return Err(ChannelError::close("Peer sent shutdown pre-funding generation".to_owned()));
}
for htlc in self.context.pending_inbound_htlcs.iter() {
if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
- return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
+ return Err(ChannelError::close("Got shutdown with remote pending HTLCs".to_owned()));
}
}
assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
assert!(send_shutdown);
let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
Ok(scriptpubkey) => scriptpubkey,
- Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
+ Err(_) => return Err(ChannelError::close("Failed to get shutdown scriptpubkey".to_owned())),
};
if !shutdown_scriptpubkey.is_compatible(their_features) {
- return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
+ return Err(ChannelError::close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
}
self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
true
where F::Target: FeeEstimator
{
if !self.context.channel_state.is_both_sides_shutdown() {
- return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
+ return Err(ChannelError::close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
}
if self.context.channel_state.is_peer_disconnected() {
- return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
+ return Err(ChannelError::close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
}
if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
- return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
+ return Err(ChannelError::close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
}
if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
- return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
+ return Err(ChannelError::close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
}
if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
- return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
+ return Err(ChannelError::close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
}
if self.context.channel_state.is_monitor_update_in_progress() {
let funding_redeemscript = self.context.get_funding_redeemscript();
let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
if used_total_fee != msg.fee_satoshis {
- return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
+ return Err(ChannelError::close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
}
let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
for outp in closing_tx.trust().built_transaction().output.iter() {
if !outp.script_pubkey.is_witness_program() && outp.value < Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS) {
- return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
+ return Err(ChannelError::close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
}
}
ChannelSignerType::Ecdsa(ecdsa) => {
let sig = ecdsa
.sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
- .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
+ .map_err(|_| ChannelError::close("External signer refused to sign closing transaction".to_owned()))?;
let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
let shutdown_result = ShutdownResult {
closure_reason,
if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
- return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
+ return Err(ChannelError::close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
}
if max_fee_satoshis < our_min_fee {
return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
} else {
if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
- return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
+ return Err(ChannelError::close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
msg.fee_satoshis, our_min_fee, our_max_fee)));
}
// The proposed fee is in our acceptable range, accept it and broadcast!
} else if last_fee < our_max_fee {
propose_fee!(our_max_fee);
} else {
- return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
+ return Err(ChannelError::close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
}
} else {
if msg.fee_satoshis > our_min_fee {
} else if last_fee > our_min_fee {
propose_fee!(our_min_fee);
} else {
- return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
+ return Err(ChannelError::close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
}
}
} else {
}
pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
- self.context.cur_holder_commitment_transaction_number + 1
+ self.context.holder_commitment_point.transaction_number() + 1
}
pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
return true;
}
- if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
+ if self.context.holder_commitment_point.transaction_number() == INITIAL_COMMITMENT_NUMBER - 1 &&
self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
// If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
// waiting for the initial monitor persistence. Thus, we check if our commitment
self.context.channel_update_status = status;
}
- fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
+ fn check_get_channel_ready<L: Deref>(&mut self, height: u32, logger: &L) -> Option<msgs::ChannelReady>
+ where L::Target: Logger
+ {
// Called:
// * always when a new block/transactions are confirmed with the new height
// * when funding is signed with a height of 0
// If we're still pending the signature on a funding transaction, then we're not ready to send a
// channel_ready yet.
if self.context.signer_pending_funding {
+ // TODO: set signer_pending_channel_ready
+ log_debug!(logger, "Can't produce channel_ready: the signer is pending funding.");
return None;
}
false
};
- if need_commitment_update {
- if !self.context.channel_state.is_monitor_update_in_progress() {
- if !self.context.channel_state.is_peer_disconnected() {
- let next_per_commitment_point =
- self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
- return Some(msgs::ChannelReady {
- channel_id: self.context.channel_id,
- next_per_commitment_point,
- short_channel_id_alias: Some(self.context.outbound_scid_alias),
- });
- }
- } else {
- self.context.monitor_pending_channel_ready = true;
- }
+ if !need_commitment_update {
+ log_debug!(logger, "Not producing channel_ready: we do not need a commitment update");
+ return None;
+ }
+
+ if self.context.channel_state.is_monitor_update_in_progress() {
+ log_debug!(logger, "Not producing channel_ready: a monitor update is in progress. Setting monitor_pending_channel_ready.");
+ self.context.monitor_pending_channel_ready = true;
+ return None;
+ }
+
+ if self.context.channel_state.is_peer_disconnected() {
+ log_debug!(logger, "Not producing channel_ready: the peer is disconnected.");
+ return None;
+ }
+
+ // TODO: when get_per_commiment_point becomes async, check if the point is
+ // available, if not, set signer_pending_channel_ready and return None
+
+ Some(self.get_channel_ready())
+ }
+
+ fn get_channel_ready(&self) -> msgs::ChannelReady {
+ debug_assert!(self.context.holder_commitment_point.is_available());
+ msgs::ChannelReady {
+ channel_id: self.context.channel_id(),
+ next_per_commitment_point: self.context.holder_commitment_point.current_point(),
+ short_channel_id_alias: Some(self.context.outbound_scid_alias),
}
- None
}
/// When a transaction is confirmed, we check whether it is or spends the funding transaction
// If we allow 1-conf funding, we may need to check for channel_ready here and
// send it immediately instead of waiting for a best_block_updated call (which
// may have already happened for this block).
- if let Some(channel_ready) = self.check_get_channel_ready(height) {
+ if let Some(channel_ready) = self.check_get_channel_ready(height, logger) {
log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
msgs = (Some(channel_ready), announcement_sigs);
self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
- if let Some(channel_ready) = self.check_get_channel_ready(height) {
+ if let Some(channel_ready) = self.check_get_channel_ready(height, logger) {
let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
} else { None };
let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
- return Err(ChannelError::Close(format!(
+ return Err(ChannelError::close(format!(
"Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
&announcement, self.context.get_counterparty_node_id())));
}
if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
- return Err(ChannelError::Close(format!(
+ return Err(ChannelError::close(format!(
"Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
&announcement, self.context.counterparty_funding_pubkey())));
}
// next_local_commitment_number is the next commitment_signed number we expect to
// receive (indicating if they need to resend one that we missed).
- next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
+ next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.holder_commitment_point.transaction_number(),
// We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
// receive, however we track it by the next commitment number for a remote transaction
// (which is one further, as they always revoke previous commitment transaction, not
}
if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
- self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+ self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER {
panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
}
/// Returns true if we can resume the channel by sending the [`msgs::OpenChannel`] again.
pub fn is_resumable(&self) -> bool {
!self.context.have_received_message() &&
- self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER
+ self.context.holder_commitment_point.transaction_number() == INITIAL_COMMITMENT_NUMBER
}
pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
panic!("Cannot generate an open_channel after we've moved forward");
}
- if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+ if self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER {
panic!("Tried to send an open_channel for a channel that has already advanced");
}
- let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ debug_assert!(self.context.holder_commitment_point.is_available());
+ let first_per_commitment_point = self.context.holder_commitment_point.current_point();
let keys = self.context.get_holder_pubkeys();
msgs::OpenChannel {
// Check sanity of message fields:
if !self.context.is_outbound() {
- return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
+ return Err(ChannelError::close("Got an accept_channel message from an inbound peer".to_owned()));
}
if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
- return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
+ return Err(ChannelError::close("Got an accept_channel message at a strange time".to_owned()));
}
if msg.common_fields.dust_limit_satoshis > 21000000 * 100000000 {
- return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.common_fields.dust_limit_satoshis)));
+ return Err(ChannelError::close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.common_fields.dust_limit_satoshis)));
}
if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
- return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
+ return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
}
if msg.common_fields.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
- return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.common_fields.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
+ return Err(ChannelError::close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.common_fields.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
}
if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
- return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
+ return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
}
let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
if msg.common_fields.htlc_minimum_msat >= full_channel_value_msat {
- return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.common_fields.htlc_minimum_msat, full_channel_value_msat)));
+ return Err(ChannelError::close(format!("Minimum htlc value ({}) is full channel value ({})", msg.common_fields.htlc_minimum_msat, full_channel_value_msat)));
}
let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
if msg.common_fields.to_self_delay > max_delay_acceptable {
- return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.common_fields.to_self_delay)));
+ return Err(ChannelError::close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.common_fields.to_self_delay)));
}
if msg.common_fields.max_accepted_htlcs < 1 {
- return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
+ return Err(ChannelError::close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
}
if msg.common_fields.max_accepted_htlcs > MAX_HTLCS {
- return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.common_fields.max_accepted_htlcs, MAX_HTLCS)));
+ return Err(ChannelError::close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.common_fields.max_accepted_htlcs, MAX_HTLCS)));
}
// Now check against optional parameters as set by config...
if msg.common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
- return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
+ return Err(ChannelError::close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
}
if msg.common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
- return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
+ return Err(ChannelError::close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
}
if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
- return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
+ return Err(ChannelError::close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
}
if msg.common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
- return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
+ return Err(ChannelError::close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
}
if msg.common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+ return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
}
if msg.common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
+ return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
}
if msg.common_fields.minimum_depth > peer_limits.max_minimum_depth {
- return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.common_fields.minimum_depth)));
+ return Err(ChannelError::close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.common_fields.minimum_depth)));
}
if let Some(ty) = &msg.common_fields.channel_type {
if *ty != self.context.channel_type {
- return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
+ return Err(ChannelError::close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
}
} else if their_features.supports_channel_type() {
// Assume they've accepted the channel type as they said they understand it.
} else {
let channel_type = ChannelTypeFeatures::from_init(&their_features);
if channel_type != ChannelTypeFeatures::only_static_remote_key() {
- return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
+ return Err(ChannelError::close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
}
self.context.channel_type = channel_type.clone();
self.context.channel_transaction_parameters.channel_type_features = channel_type;
None
} else {
if !script::is_bolt2_compliant(&script, their_features) {
- return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
+ return Err(ChannelError::close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
}
Some(script.clone())
}
},
// Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
&None => {
- return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
+ return Err(ChannelError::close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
}
}
} else { None };
L::Target: Logger
{
if !self.context.is_outbound() {
- return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
+ return Err((self, ChannelError::close("Received funding_signed for an inbound channel?".to_owned())));
}
if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
- return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
+ return Err((self, ChannelError::close("Received funding_signed in strange state!".to_owned())));
}
if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
- self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+ self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER {
panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
}
log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
&self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
- let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
- let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
+ let holder_signer = self.context.build_holder_transaction_keys();
+ let initial_commitment_tx = self.context.build_commitment_transaction(self.context.holder_commitment_point.transaction_number(), &holder_signer, true, false, logger).tx;
{
let trusted_tx = initial_commitment_tx.trust();
let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
// They sign our commitment transaction, allowing us to broadcast the tx if we wish.
if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
- return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
+ return Err((self, ChannelError::close("Invalid funding_signed signature from peer".to_owned())));
}
}
let validated =
self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
if validated.is_err() {
- return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
+ return Err((self, ChannelError::close("Failed to validate our commitment".to_owned())));
}
let funding_redeemscript = self.context.get_funding_redeemscript();
} else {
self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
}
- self.context.cur_holder_commitment_transaction_number -= 1;
+ self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger);
self.context.cur_counterparty_commitment_transaction_number -= 1;
log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
dual_funding_channel_context: None,
};
- let need_channel_ready = channel.check_get_channel_ready(0).is_some();
+ let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some();
channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
Ok((channel, channel_monitor))
}
) -> Result<ChannelTypeFeatures, ChannelError> {
if let Some(channel_type) = &common_fields.channel_type {
if channel_type.supports_any_optional_bits() {
- return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
+ return Err(ChannelError::close("Channel Type field contained optional bits - this is not allowed".to_owned()));
}
// We only support the channel types defined by the `ChannelManager` in
// `provided_channel_type_features`. The channel type must always support
// `static_remote_key`.
if !channel_type.requires_static_remote_key() {
- return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
+ return Err(ChannelError::close("Channel Type was not understood - we require static remote key".to_owned()));
}
// Make sure we support all of the features behind the channel type.
if !channel_type.is_subset(our_supported_features) {
- return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
+ return Err(ChannelError::close("Channel Type contains unsupported features".to_owned()));
}
let announced_channel = if (common_fields.channel_flags & 1) == 1 { true } else { false };
if channel_type.requires_scid_privacy() && announced_channel {
- return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
+ return Err(ChannelError::close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
}
Ok(channel_type.clone())
} else {
let channel_type = ChannelTypeFeatures::from_init(&their_features);
if channel_type != ChannelTypeFeatures::only_static_remote_key() {
- return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
+ return Err(ChannelError::close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
}
Ok(channel_type)
}
) {
panic!("Tried to send accept_channel after channel had moved forward");
}
- if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+ if self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER {
panic!("Tried to send an accept_channel for a channel that has already advanced");
}
///
/// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
- let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ debug_assert!(self.context.holder_commitment_point.is_available());
+ let first_per_commitment_point = self.context.holder_commitment_point.current_point();
let keys = self.context.get_holder_pubkeys();
msgs::AcceptChannel {
fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
let funding_script = self.context.get_funding_redeemscript();
- let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
- let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
+ let keys = self.context.build_holder_transaction_keys();
+ let initial_commitment_tx = self.context.build_commitment_transaction(self.context.holder_commitment_point.transaction_number(), &keys, true, false, logger).tx;
let trusted_tx = initial_commitment_tx.trust();
let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
L::Target: Logger
{
if self.context.is_outbound() {
- return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
+ return Err((self, ChannelError::close("Received funding_created for an outbound channel?".to_owned())));
}
if !matches!(
self.context.channel_state, ChannelState::NegotiatingFunding(flags)
// BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
// remember the channel, so it's safe to just send an error_message here and drop the
// channel.
- return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
+ return Err((self, ChannelError::close("Received funding_created after we got the channel!".to_owned())));
}
if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
- self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+ self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER {
panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
}
);
if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
- return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
+ return Err((self, ChannelError::close("Failed to validate our commitment".to_owned())));
}
// Now that we're past error-generating stuff, update our local state:
self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
self.context.cur_counterparty_commitment_transaction_number -= 1;
- self.context.cur_holder_commitment_transaction_number -= 1;
+ self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger);
let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
#[cfg(any(dual_funding, splicing))]
dual_funding_channel_context: None,
};
- let need_channel_ready = channel.check_get_channel_ready(0).is_some();
+ let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some();
channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
Ok((channel, funding_signed, channel_monitor))
debug_assert!(false, "Cannot generate an open_channel2 after we've moved forward");
}
- if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+ if self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER {
debug_assert!(false, "Tried to send an open_channel2 for a channel that has already advanced");
}
let first_per_commitment_point = self.context.holder_signer.as_ref()
- .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number,
+ .get_per_commitment_point(self.context.holder_commitment_point.transaction_number(),
&self.context.secp_ctx);
let second_per_commitment_point = self.context.holder_signer.as_ref()
- .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number - 1,
+ .get_per_commitment_point(self.context.holder_commitment_point.transaction_number() - 1,
&self.context.secp_ctx);
let keys = self.context.get_holder_pubkeys();
// First check the channel type is known, failing before we do anything else if we don't
// support this channel type.
if msg.common_fields.channel_type.is_none() {
- return Err(ChannelError::Close(format!("Rejecting V2 channel {} missing channel_type",
+ return Err(ChannelError::close(format!("Rejecting V2 channel {} missing channel_type",
msg.common_fields.temporary_channel_id)))
}
let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?;
) {
debug_assert!(false, "Tried to send accept_channel2 after channel had moved forward");
}
- if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+ if self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER {
debug_assert!(false, "Tried to send an accept_channel2 for a channel that has already advanced");
}
/// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
fn generate_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 {
let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
- self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ self.context.holder_commitment_point.transaction_number(), &self.context.secp_ctx);
let second_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
- self.context.cur_holder_commitment_transaction_number - 1, &self.context.secp_ctx);
+ self.context.holder_commitment_point.transaction_number() - 1, &self.context.secp_ctx);
let keys = self.context.get_holder_pubkeys();
msgs::AcceptChannelV2 {
}
self.context.destination_script.write(writer)?;
- self.context.cur_holder_commitment_transaction_number.write(writer)?;
+ self.context.holder_commitment_point.transaction_number().write(writer)?;
self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
self.context.value_to_self_msat.write(writer)?;
monitor_pending_update_adds = Some(&self.context.monitor_pending_update_adds);
}
+ // `current_point` will become optional when async signing is implemented.
+ let cur_holder_commitment_point = Some(self.context.holder_commitment_point.current_point());
+ let next_holder_commitment_point = self.context.holder_commitment_point.next_point();
+
write_tlv_fields!(writer, {
(0, self.context.announcement_sigs, option),
// minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
(39, pending_outbound_blinding_points, optional_vec),
(41, holding_cell_blinding_points, optional_vec),
(43, malformed_htlcs, optional_vec), // Added in 0.0.119
- // 45 and 47 are reserved for async signing
+ (45, cur_holder_commitment_point, option),
+ (47, next_holder_commitment_point, option),
(49, self.context.local_initiated_shutdown, option), // Added in 0.0.122
});
let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
let mut monitor_pending_update_adds: Option<Vec<msgs::UpdateAddHTLC>> = None;
+ let mut cur_holder_commitment_point_opt: Option<PublicKey> = None;
+ let mut next_holder_commitment_point_opt: Option<PublicKey> = None;
+
read_tlv_fields!(reader, {
(0, announcement_sigs, option),
(1, minimum_depth, option),
(39, pending_outbound_blinding_points_opt, optional_vec),
(41, holding_cell_blinding_points_opt, optional_vec),
(43, malformed_htlcs, optional_vec), // Added in 0.0.119
- // 45 and 47 are reserved for async signing
+ (45, cur_holder_commitment_point_opt, option),
+ (47, next_holder_commitment_point_opt, option),
(49, local_initiated_shutdown, option),
});
}
}
+ // If we're restoring this channel for the first time after an upgrade, then we require that the
+ // signer be available so that we can immediately populate the current commitment point. Channel
+ // restoration will fail if this is not possible.
+ let holder_commitment_point = match (cur_holder_commitment_point_opt, next_holder_commitment_point_opt) {
+ (Some(current), Some(next)) => HolderCommitmentPoint::Available {
+ transaction_number: cur_holder_commitment_transaction_number, current, next
+ },
+ (Some(current), _) => HolderCommitmentPoint::Available {
+ transaction_number: cur_holder_commitment_transaction_number, current,
+ next: holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number - 1, &secp_ctx),
+ },
+ (_, _) => HolderCommitmentPoint::Available {
+ transaction_number: cur_holder_commitment_transaction_number,
+ current: holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number, &secp_ctx),
+ next: holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number - 1, &secp_ctx),
+ },
+ };
+
Ok(Channel {
context: ChannelContext {
user_id,
shutdown_scriptpubkey,
destination_script,
- cur_holder_commitment_transaction_number,
+ holder_commitment_point,
cur_counterparty_commitment_transaction_number,
value_to_self_msat,
// Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
node_a_chan.set_batch_ready();
assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
- assert!(node_a_chan.check_get_channel_ready(0).is_some());
+ assert!(node_a_chan.check_get_channel_ready(0, &&logger).is_some());
}
}
err: msg,
action: msgs::ErrorAction::IgnoreError,
},
- ChannelError::Close(msg) => LightningError {
+ ChannelError::Close((msg, _reason)) => LightningError {
err: msg.clone(),
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage {
/// accepted. An unaccepted channel that exceeds this limit will be abandoned.
const UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS: i32 = 2;
+/// The number of blocks of historical feerate estimates we keep around and consider when deciding
+/// to force-close a channel for having too-low fees. Also the number of blocks we have to see
+/// after startup before we consider force-closing channels for having too-low fees.
+pub(super) const FEERATE_TRACKING_BLOCKS: usize = 144;
+
/// Stores a PaymentSecret and any other data we may need to validate an inbound payment is
/// actually ours and not some duplicate HTLC sent to us by a node along the route.
///
/// #
/// # fn example<T: AChannelManager>(channel_manager: T) -> Result<(), Bolt12SemanticError> {
/// # let channel_manager = channel_manager.get_cm();
+/// # let absolute_expiry = None;
/// let offer = channel_manager
-/// .create_offer_builder()?
+/// .create_offer_builder(absolute_expiry)?
/// # ;
/// # // Needed for compiling for c_bindings
/// # let builder: lightning::offers::offer::OfferBuilder<_, _> = offer.into();
/// Tracks the message events that are to be broadcasted when we are connected to some peer.
pending_broadcast_messages: Mutex<Vec<MessageSendEvent>>,
+ /// We only want to force-close our channels on peers based on stale feerates when we're
+ /// confident the feerate on the channel is *really* stale, not just became stale recently.
+ /// Thus, we store the fee estimates we had as of the last [`FEERATE_TRACKING_BLOCKS`] blocks
+ /// (after startup completed) here, and only force-close when channels have a lower feerate
+ /// than we predicted any time in the last [`FEERATE_TRACKING_BLOCKS`] blocks.
+ ///
+ /// We only keep this in memory as we assume any feerates we receive immediately after startup
+ /// may be bunk (as they often are if Bitcoin Core crashes) and want to delay taking any
+ /// actions for a day anyway.
+ ///
+ /// The first element in the pair is the
+ /// [`ConfirmationTarget::MinAllowedAnchorChannelRemoteFee`] estimate, the second the
+ /// [`ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee`] estimate.
+ last_days_feerates: Mutex<VecDeque<(u32, u32)>>,
+
entropy_source: ES,
node_signer: NS,
signer_provider: SP,
/// many peers we reject new (inbound) connections.
const MAX_NO_CHANNEL_PEERS: usize = 250;
+/// The maximum expiration from the current time where an [`Offer`] or [`Refund`] is considered
+/// short-lived, while anything with a greater expiration is considered long-lived.
+///
+/// Using [`ChannelManager::create_offer_builder`] or [`ChannelManager::create_refund_builder`],
+/// will included a [`BlindedPath`] created using:
+/// - [`MessageRouter::create_compact_blinded_paths`] when short-lived, and
+/// - [`MessageRouter::create_blinded_paths`] when long-lived.
+///
+/// Using compact [`BlindedPath`]s may provide better privacy as the [`MessageRouter`] could select
+/// more hops. However, since they use short channel ids instead of pubkeys, they are more likely to
+/// become invalid over time as channels are closed. Thus, they are only suitable for short-term use.
+pub const MAX_SHORT_LIVED_RELATIVE_EXPIRY: Duration = Duration::from_secs(60 * 60 * 24);
+
/// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments.
/// These include payments that have yet to find a successful path, or have unresolved HTLCs.
#[derive(Debug, PartialEq)]
ChannelError::Ignore(msg) => {
(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), *$channel_id))
},
- ChannelError::Close(msg) => {
+ ChannelError::Close((msg, reason)) => {
let logger = WithChannelContext::from(&$self.logger, &$channel.context, None);
log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
update_maps_on_chan_removal!($self, $channel.context);
- let reason = ClosureReason::ProcessingError { err: msg.clone() };
let shutdown_res = $channel.context.force_shutdown(true, reason);
let err =
MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $channel_update);
pending_offers_messages: Mutex::new(Vec::new()),
pending_broadcast_messages: Mutex::new(Vec::new()),
+ last_days_feerates: Mutex::new(VecDeque::new()),
+
entropy_source,
node_signer,
signer_provider,
Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
macro_rules! close_chan { ($err: expr, $api_err: expr, $chan: expr) => { {
let counterparty;
- let err = if let ChannelError::Close(msg) = $err {
+ let err = if let ChannelError::Close((msg, reason)) = $err {
let channel_id = $chan.context.channel_id();
counterparty = chan.context.get_counterparty_node_id();
- let reason = ClosureReason::ProcessingError { err: msg.clone() };
let shutdown_res = $chan.context.force_shutdown(false, reason);
MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None)
} else { unreachable!(); };
match find_funding_output(&chan, &funding_transaction) {
Ok(found_funding_txo) => funding_txo = found_funding_txo,
Err(err) => {
- let chan_err = ChannelError::Close(err.to_owned());
+ let chan_err = ChannelError::close(err.to_owned());
let api_err = APIError::APIMisuseError { err: err.to_owned() };
return close_chan!(chan_err, api_err, chan);
},
},
Some(mut phase) => {
let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id);
- let err = ChannelError::Close(err_msg);
+ let err = ChannelError::close(err_msg);
return Err(convert_chan_phase_err!(self, err, &mut phase, &msg.temporary_channel_id).1);
},
None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
// `update_maps_on_chan_removal`), we'll remove the existing channel
// from `outpoint_to_peer`. Thus, we must first unset the funding outpoint
// on the channel.
- let err = ChannelError::Close($err.to_owned());
+ let err = ChannelError::close($err.to_owned());
chan.unset_funding_info(msg.temporary_channel_id);
return Err(convert_chan_phase_err!(self, err, chan, &funded_channel_id, UNFUNDED_CHANNEL).1);
} } }
} else { unreachable!(); }
Ok(())
} else {
- let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned());
+ let e = ChannelError::close("Channel funding outpoint was a duplicate".to_owned());
// We weren't able to watch the channel to begin with, so no
// updates should be made on it. Previously, full_stack_target
// found an (unreachable) panic when the monitor update contained
Ok(())
} else {
- try_chan_phase_entry!(self, Err(ChannelError::Close(
+ try_chan_phase_entry!(self, Err(ChannelError::close(
"Got a channel_ready message for an unfunded channel!".into())), chan_phase_entry)
}
},
(tx, Some(remove_channel_phase!(self, chan_phase_entry)), shutdown_result)
} else { (tx, None, shutdown_result) }
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry);
}
},
}
try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, &self.fee_estimator), chan_phase_entry);
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry);
}
},
next_user_channel_id = chan.context.get_user_id();
res
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got an update_fulfill_htlc message for an unfunded channel!".into())), chan_phase_entry);
}
},
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
try_chan_phase_entry!(self, chan.update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan_phase_entry);
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got an update_fail_htlc message for an unfunded channel!".into())), chan_phase_entry);
}
},
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if (msg.failure_code & 0x8000) == 0 {
- let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
+ let chan_err = ChannelError::close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
try_chan_phase_entry!(self, Err(chan_err), chan_phase_entry);
}
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
try_chan_phase_entry!(self, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan_phase_entry);
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got an update_fail_malformed_htlc message for an unfunded channel!".into())), chan_phase_entry);
}
Ok(())
}
Ok(())
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got a commitment_signed message for an unfunded channel!".into())), chan_phase_entry);
}
},
}
htlcs_to_fail
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got a revoke_and_ack message for an unfunded channel!".into())), chan_phase_entry);
}
},
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
try_chan_phase_entry!(self, chan.update_fee(&self.fee_estimator, &msg, &&logger), chan_phase_entry);
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got an update_fee message for an unfunded channel!".into())), chan_phase_entry);
}
},
update_msg: Some(self.get_channel_update_for_broadcast(chan).unwrap()),
});
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got an announcement_signatures message for an unfunded channel!".into())), chan_phase_entry);
}
},
}
}
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got a channel_update for an unfunded channel!".into())), chan_phase_entry);
}
},
}
need_lnd_workaround
} else {
- return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ return try_chan_phase_entry!(self, Err(ChannelError::close(
"Got a channel_reestablish message for an unfunded channel!".into())), chan_phase_entry);
}
},
macro_rules! create_offer_builder { ($self: ident, $builder: ty) => {
/// Creates an [`OfferBuilder`] such that the [`Offer`] it builds is recognized by the
- /// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer will
- /// not have an expiration unless otherwise set on the builder.
+ /// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer's
+ /// expiration will be `absolute_expiry` if `Some`, otherwise it will not expire.
///
/// # Privacy
///
- /// Uses [`MessageRouter::create_blinded_paths`] to construct a [`BlindedPath`] for the offer.
- /// However, if one is not found, uses a one-hop [`BlindedPath`] with
- /// [`ChannelManager::get_our_node_id`] as the introduction node instead. In the latter case,
- /// the node must be announced, otherwise, there is no way to find a path to the introduction in
- /// order to send the [`InvoiceRequest`].
+ /// Uses [`MessageRouter`] to construct a [`BlindedPath`] for the offer based on the given
+ /// `absolute_expiry` according to [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`]. See those docs for
+ /// privacy implications as well as those of the parameterized [`Router`], which implements
+ /// [`MessageRouter`].
///
/// Also, uses a derived signing pubkey in the offer for recipient privacy.
///
///
/// [`Offer`]: crate::offers::offer::Offer
/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
- pub fn create_offer_builder(&$self) -> Result<$builder, Bolt12SemanticError> {
+ pub fn create_offer_builder(
+ &$self, absolute_expiry: Option<Duration>
+ ) -> Result<$builder, Bolt12SemanticError> {
let node_id = $self.get_our_node_id();
let expanded_key = &$self.inbound_payment_key;
let entropy = &*$self.entropy_source;
let secp_ctx = &$self.secp_ctx;
- let path = $self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
+ let path = $self.create_blinded_path_using_absolute_expiry(absolute_expiry)
+ .map_err(|_| Bolt12SemanticError::MissingPaths)?;
let builder = OfferBuilder::deriving_signing_pubkey(
node_id, expanded_key, entropy, secp_ctx
)
.chain_hash($self.chain_hash)
.path(path);
+ let builder = match absolute_expiry {
+ None => builder,
+ Some(absolute_expiry) => builder.absolute_expiry(absolute_expiry),
+ };
+
Ok(builder.into())
}
} }
///
/// # Privacy
///
- /// Uses [`MessageRouter::create_blinded_paths`] to construct a [`BlindedPath`] for the refund.
- /// However, if one is not found, uses a one-hop [`BlindedPath`] with
- /// [`ChannelManager::get_our_node_id`] as the introduction node instead. In the latter case,
- /// the node must be announced, otherwise, there is no way to find a path to the introduction in
- /// order to send the [`Bolt12Invoice`].
+ /// Uses [`MessageRouter`] to construct a [`BlindedPath`] for the refund based on the given
+ /// `absolute_expiry` according to [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`]. See those docs for
+ /// privacy implications as well as those of the parameterized [`Router`], which implements
+ /// [`MessageRouter`].
///
/// Also, uses a derived payer id in the refund for payer privacy.
///
let entropy = &*$self.entropy_source;
let secp_ctx = &$self.secp_ctx;
- let path = $self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
+ let path = $self.create_blinded_path_using_absolute_expiry(Some(absolute_expiry))
+ .map_err(|_| Bolt12SemanticError::MissingPaths)?;
let builder = RefundBuilder::deriving_payer_id(
node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id
)?
///
/// # Privacy
///
- /// Uses a one-hop [`BlindedPath`] for the reply path with [`ChannelManager::get_our_node_id`]
- /// as the introduction node and a derived payer id for payer privacy. As such, currently, the
- /// node must be announced. Otherwise, there is no way to find a path to the introduction node
- /// in order to send the [`Bolt12Invoice`].
+ /// For payer privacy, uses a derived payer id and uses [`MessageRouter::create_blinded_paths`]
+ /// to construct a [`BlindedPath`] for the reply path. For further privacy implications, see the
+ /// docs of the parameterized [`Router`], which implements [`MessageRouter`].
///
/// # Limitations
///
inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key)
}
+ /// Creates a blinded path by delegating to [`MessageRouter`] based on the path's intended
+ /// lifetime.
+ ///
+ /// Whether or not the path is compact depends on whether the path is short-lived or long-lived,
+ /// respectively, based on the given `absolute_expiry` as seconds since the Unix epoch. See
+ /// [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`].
+ fn create_blinded_path_using_absolute_expiry(
+ &self, absolute_expiry: Option<Duration>
+ ) -> Result<BlindedPath, ()> {
+ let now = self.duration_since_epoch();
+ let max_short_lived_absolute_expiry = now.saturating_add(MAX_SHORT_LIVED_RELATIVE_EXPIRY);
+
+ if absolute_expiry.unwrap_or(Duration::MAX) <= max_short_lived_absolute_expiry {
+ self.create_compact_blinded_path()
+ } else {
+ self.create_blinded_path()
+ }
+ }
+
+ pub(super) fn duration_since_epoch(&self) -> Duration {
+ #[cfg(not(feature = "std"))]
+ let now = Duration::from_secs(
+ self.highest_seen_timestamp.load(Ordering::Acquire) as u64
+ );
+ #[cfg(feature = "std")]
+ let now = std::time::SystemTime::now()
+ .duration_since(std::time::SystemTime::UNIX_EPOCH)
+ .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
+
+ now
+ }
+
/// Creates a blinded path by delegating to [`MessageRouter::create_blinded_paths`].
///
/// Errors if the `MessageRouter` errors or returns an empty `Vec`.
let peers = self.per_peer_state.read().unwrap()
.iter()
.map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
+ .filter(|(_, peer)| peer.is_connected)
+ .filter(|(_, peer)| peer.latest_features.supports_onion_messages())
+ .map(|(node_id, _)| *node_id)
+ .collect::<Vec<_>>();
+
+ self.router
+ .create_blinded_paths(recipient, peers, secp_ctx)
+ .and_then(|paths| paths.into_iter().next().ok_or(()))
+ }
+
+ /// Creates a blinded path by delegating to [`MessageRouter::create_compact_blinded_paths`].
+ ///
+ /// Errors if the `MessageRouter` errors or returns an empty `Vec`.
+ fn create_compact_blinded_path(&self) -> Result<BlindedPath, ()> {
+ let recipient = self.get_our_node_id();
+ let secp_ctx = &self.secp_ctx;
+
+ let peers = self.per_peer_state.read().unwrap()
+ .iter()
+ .map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
+ .filter(|(_, peer)| peer.is_connected)
.filter(|(_, peer)| peer.latest_features.supports_onion_messages())
.map(|(node_id, peer)| ForwardNode {
node_id: *node_id,
.collect::<Vec<_>>();
self.router
- .create_blinded_paths(recipient, peers, secp_ctx)
+ .create_compact_blinded_paths(recipient, peers, secp_ctx)
.and_then(|paths| paths.into_iter().next().ok_or(()))
}
self, || -> NotifyOption { NotifyOption::DoPersist });
*self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
- self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)));
+ let mut min_anchor_feerate = None;
+ let mut min_non_anchor_feerate = None;
+ if self.background_events_processed_since_startup.load(Ordering::Relaxed) {
+ // If we're past the startup phase, update our feerate cache
+ let mut last_days_feerates = self.last_days_feerates.lock().unwrap();
+ if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS {
+ last_days_feerates.pop_front();
+ }
+ let anchor_feerate = self.fee_estimator
+ .bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedAnchorChannelRemoteFee);
+ let non_anchor_feerate = self.fee_estimator
+ .bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee);
+ last_days_feerates.push_back((anchor_feerate, non_anchor_feerate));
+ if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS {
+ min_anchor_feerate = last_days_feerates.iter().map(|(f, _)| f).min().copied();
+ min_non_anchor_feerate = last_days_feerates.iter().map(|(_, f)| f).min().copied();
+ }
+ }
+
+ self.do_chain_event(Some(height), |channel| {
+ let logger = WithChannelContext::from(&self.logger, &channel.context, None);
+ if channel.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ if let Some(feerate) = min_anchor_feerate {
+ channel.check_for_stale_feerate(&logger, feerate)?;
+ }
+ } else {
+ if let Some(feerate) = min_non_anchor_feerate {
+ channel.check_for_stale_feerate(&logger, feerate)?;
+ }
+ }
+ channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None))
+ });
macro_rules! max_time {
($timestamp: expr) => {
node_signer: args.node_signer,
signer_provider: args.signer_provider,
+ last_days_feerates: Mutex::new(VecDeque::new()),
+
logger: args.logger,
default_configuration: args.default_config,
};
if let Some(chan_closed) = conditions.expected_blamed_chan_closed {
if let PathFailure::OnPath { network_update: Some(upd) } = failure {
match upd {
- NetworkUpdate::ChannelUpdateMessage { ref msg } if !chan_closed => {
- if let Some(scid) = conditions.expected_blamed_scid {
- assert_eq!(msg.contents.short_channel_id, scid);
- }
- const CHAN_DISABLED_FLAG: u8 = 2;
- assert_eq!(msg.contents.flags & CHAN_DISABLED_FLAG, 0);
- },
- NetworkUpdate::ChannelFailure { short_channel_id, is_permanent } if chan_closed => {
+ NetworkUpdate::ChannelFailure { short_channel_id, is_permanent } => {
if let Some(scid) = conditions.expected_blamed_scid {
assert_eq!(*short_channel_id, scid);
}
- assert!(is_permanent);
+ assert_eq!(*is_permanent, chan_closed);
},
_ => panic!("Unexpected update type"),
}
for i in 0..node_count {
for j in (i+1)..node_count {
- let node_id_i = nodes[i].node.get_our_node_id();
- let node_id_j = nodes[j].node.get_our_node_id();
-
- let init_i = msgs::Init {
- features: nodes[i].init_features(&node_id_j),
- networks: None,
- remote_network_address: None,
- };
- let init_j = msgs::Init {
- features: nodes[j].init_features(&node_id_i),
- networks: None,
- remote_network_address: None,
- };
-
- nodes[i].node.peer_connected(&node_id_j, &init_j, true).unwrap();
- nodes[j].node.peer_connected(&node_id_i, &init_i, false).unwrap();
- nodes[i].onion_messenger.peer_connected(&node_id_j, &init_j, true).unwrap();
- nodes[j].onion_messenger.peer_connected(&node_id_i, &init_i, false).unwrap();
+ connect_nodes(&nodes[i], &nodes[j]);
}
}
nodes
}
+fn connect_nodes<'a, 'b: 'a, 'c: 'b>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>) {
+ let node_id_a = node_a.node.get_our_node_id();
+ let node_id_b = node_b.node.get_our_node_id();
+
+ let init_a = msgs::Init {
+ features: node_a.init_features(&node_id_b),
+ networks: None,
+ remote_network_address: None,
+ };
+ let init_b = msgs::Init {
+ features: node_b.init_features(&node_id_a),
+ networks: None,
+ remote_network_address: None,
+ };
+
+ node_a.node.peer_connected(&node_id_b, &init_b, true).unwrap();
+ node_b.node.peer_connected(&node_id_a, &init_a, false).unwrap();
+ node_a.onion_messenger.peer_connected(&node_id_b, &init_b, true).unwrap();
+ node_b.onion_messenger.peer_connected(&node_id_a, &init_a, false).unwrap();
+}
+
pub fn connect_dummy_node<'a, 'b: 'a, 'c: 'b>(node: &Node<'a, 'b, 'c>) {
let node_id_dummy = PublicKey::from_slice(&[2; 33]).unwrap();
pending_cell_htlc_claims, pending_cell_htlc_fails, pending_raa,
pending_responding_commitment_signed, pending_responding_commitment_signed_dup_monitor,
} = args;
- node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init {
- features: node_b.node.init_features(), networks: None, remote_network_address: None
- }, true).unwrap();
+ connect_nodes(node_a, node_b);
let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b);
- node_b.node.peer_connected(&node_a.node.get_our_node_id(), &msgs::Init {
- features: node_a.node.init_features(), networks: None, remote_network_address: None
- }, false).unwrap();
let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a);
if send_channel_ready.0 {
&low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
{
match error {
- ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
+ ChannelError::Close((err, _)) => {
+ let regex = regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap();
+ assert!(regex.is_match(err.as_str()));
+ },
_ => panic!("Unexpected event"),
}
} else { assert!(false); }
&high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
{
match error {
- ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); },
+ ChannelError::Close((err, _)) => {
+ let regex = regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap();
+ assert!(regex.is_match(err.as_str()));
+ },
_ => panic!("Unexpected event"),
}
} else { assert!(false); }
match events[0] {
MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
- check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError {
- err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000".to_owned() },
- [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::PeerFeerateTooLow {
+ peer_feerate_sat_per_kw: 1000, required_feerate_sat_per_kw: 5000,
+ }, [nodes[0].node.get_our_node_id()], 100000);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
},
use crate::blinded_path::{BlindedPath, IntroductionNode};
use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, PaymentContext};
use crate::events::{Event, MessageSendEventsProvider, PaymentPurpose};
-use crate::ln::channelmanager::{PaymentId, RecentPaymentDetails, Retry, self};
+use crate::ln::channelmanager::{MAX_SHORT_LIVED_RELATIVE_EXPIRY, PaymentId, RecentPaymentDetails, Retry, self};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::{ChannelMessageHandler, Init, NodeAnnouncement, OnionMessage, OnionMessageHandler, RoutingMessageHandler, SocketAddress, UnsignedGossipMessage, UnsignedNodeAnnouncement};
use crate::offers::invoice::Bolt12Invoice;
announce_node_address(charlie, &[alice, bob, david, &nodes[4], &nodes[5]], tor.clone());
let offer = bob.node
- .create_offer_builder().unwrap()
+ .create_offer_builder(None).unwrap()
.amount_msats(10_000_000)
.build().unwrap();
assert_ne!(offer.signing_pubkey(), Some(bob_id));
announce_node_address(&nodes[5], &[alice, bob, charlie, david, &nodes[4]], tor.clone());
let offer = bob.node
- .create_offer_builder().unwrap()
+ .create_offer_builder(None).unwrap()
.amount_msats(10_000_000)
.build().unwrap();
assert_ne!(offer.signing_pubkey(), Some(bob_id));
disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]);
let offer = bob.node
- .create_offer_builder().unwrap()
+ .create_offer_builder(None).unwrap()
.amount_msats(10_000_000)
.build().unwrap();
assert_ne!(offer.signing_pubkey(), Some(bob_id));
}
}
+/// Checks that blinded paths are compact for short-lived offers.
+#[test]
+fn creates_short_lived_offer() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ let alice = &nodes[0];
+ let alice_id = alice.node.get_our_node_id();
+ let bob = &nodes[1];
+
+ let absolute_expiry = alice.node.duration_since_epoch() + MAX_SHORT_LIVED_RELATIVE_EXPIRY;
+ let offer = alice.node
+ .create_offer_builder(Some(absolute_expiry)).unwrap()
+ .build().unwrap();
+ assert_eq!(offer.absolute_expiry(), Some(absolute_expiry));
+ assert!(!offer.paths().is_empty());
+ for path in offer.paths() {
+ let introduction_node_id = resolve_introduction_node(bob, &path);
+ assert_eq!(introduction_node_id, alice_id);
+ assert!(matches!(path.introduction_node, IntroductionNode::DirectedShortChannelId(..)));
+ }
+}
+
+/// Checks that blinded paths are not compact for long-lived offers.
+#[test]
+fn creates_long_lived_offer() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ let alice = &nodes[0];
+ let alice_id = alice.node.get_our_node_id();
+
+ let absolute_expiry = alice.node.duration_since_epoch() + MAX_SHORT_LIVED_RELATIVE_EXPIRY
+ + Duration::from_secs(1);
+ let offer = alice.node
+ .create_offer_builder(Some(absolute_expiry))
+ .unwrap()
+ .build().unwrap();
+ assert_eq!(offer.absolute_expiry(), Some(absolute_expiry));
+ assert!(!offer.paths().is_empty());
+ for path in offer.paths() {
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(alice_id));
+ }
+
+ let offer = alice.node
+ .create_offer_builder(None).unwrap()
+ .build().unwrap();
+ assert_eq!(offer.absolute_expiry(), None);
+ assert!(!offer.paths().is_empty());
+ for path in offer.paths() {
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(alice_id));
+ }
+}
+
+/// Checks that blinded paths are compact for short-lived refunds.
+#[test]
+fn creates_short_lived_refund() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ let alice = &nodes[0];
+ let bob = &nodes[1];
+ let bob_id = bob.node.get_our_node_id();
+
+ let absolute_expiry = bob.node.duration_since_epoch() + MAX_SHORT_LIVED_RELATIVE_EXPIRY;
+ let payment_id = PaymentId([1; 32]);
+ let refund = bob.node
+ .create_refund_builder(10_000_000, absolute_expiry, payment_id, Retry::Attempts(0), None)
+ .unwrap()
+ .build().unwrap();
+ assert_eq!(refund.absolute_expiry(), Some(absolute_expiry));
+ assert!(!refund.paths().is_empty());
+ for path in refund.paths() {
+ let introduction_node_id = resolve_introduction_node(alice, &path);
+ assert_eq!(introduction_node_id, bob_id);
+ assert!(matches!(path.introduction_node, IntroductionNode::DirectedShortChannelId(..)));
+ }
+}
+
+/// Checks that blinded paths are not compact for long-lived refunds.
+#[test]
+fn creates_long_lived_refund() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ let bob = &nodes[1];
+ let bob_id = bob.node.get_our_node_id();
+
+ let absolute_expiry = bob.node.duration_since_epoch() + MAX_SHORT_LIVED_RELATIVE_EXPIRY
+ + Duration::from_secs(1);
+ let payment_id = PaymentId([1; 32]);
+ let refund = bob.node
+ .create_refund_builder(10_000_000, absolute_expiry, payment_id, Retry::Attempts(0), None)
+ .unwrap()
+ .build().unwrap();
+ assert_eq!(refund.absolute_expiry(), Some(absolute_expiry));
+ assert!(!refund.paths().is_empty());
+ for path in refund.paths() {
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(bob_id));
+ }
+}
+
/// Checks that an offer can be paid through blinded paths and that ephemeral pubkeys are used
/// rather than exposing a node's pubkey.
#[test]
disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]);
let offer = alice.node
- .create_offer_builder()
+ .create_offer_builder(None)
.unwrap()
.amount_msats(10_000_000)
.build().unwrap();
assert_ne!(offer.signing_pubkey(), Some(alice_id));
assert!(!offer.paths().is_empty());
for path in offer.paths() {
- let introduction_node_id = resolve_introduction_node(david, &path);
- assert_eq!(introduction_node_id, bob_id);
- assert!(matches!(path.introduction_node, IntroductionNode::DirectedShortChannelId(..)));
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(bob_id));
}
let payment_id = PaymentId([1; 32]);
payer_note_truncated: None,
},
});
- let introduction_node_id = resolve_introduction_node(alice, &reply_path);
assert_eq!(invoice_request.amount_msats(), None);
assert_ne!(invoice_request.payer_id(), david_id);
- assert_eq!(introduction_node_id, charlie_id);
- assert!(matches!(reply_path.introduction_node, IntroductionNode::DirectedShortChannelId(..)));
+ assert_eq!(reply_path.introduction_node, IntroductionNode::NodeId(charlie_id));
let onion_message = alice.onion_messenger.next_onion_message_for_peer(charlie_id).unwrap();
charlie.onion_messenger.handle_onion_message(&alice_id, &onion_message);
assert_ne!(refund.payer_id(), david_id);
assert!(!refund.paths().is_empty());
for path in refund.paths() {
- let introduction_node_id = resolve_introduction_node(alice, &path);
- assert_eq!(introduction_node_id, charlie_id);
- assert!(matches!(path.introduction_node, IntroductionNode::DirectedShortChannelId(..)));
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(charlie_id));
}
expect_recent_payment!(david, RecentPaymentDetails::AwaitingInvoice, payment_id);
let bob_id = bob.node.get_our_node_id();
let offer = alice.node
- .create_offer_builder().unwrap()
+ .create_offer_builder(None).unwrap()
.amount_msats(10_000_000)
.build().unwrap();
assert_ne!(offer.signing_pubkey(), Some(alice_id));
assert!(!offer.paths().is_empty());
for path in offer.paths() {
- let introduction_node_id = resolve_introduction_node(bob, &path);
- assert_eq!(introduction_node_id, alice_id);
- assert!(matches!(path.introduction_node, IntroductionNode::DirectedShortChannelId(..)));
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(alice_id));
}
let payment_id = PaymentId([1; 32]);
payer_note_truncated: None,
},
});
- let introduction_node_id = resolve_introduction_node(alice, &reply_path);
assert_eq!(invoice_request.amount_msats(), None);
assert_ne!(invoice_request.payer_id(), bob_id);
- assert_eq!(introduction_node_id, bob_id);
- assert!(matches!(reply_path.introduction_node, IntroductionNode::DirectedShortChannelId(..)));
+ assert_eq!(reply_path.introduction_node, IntroductionNode::NodeId(bob_id));
let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap();
bob.onion_messenger.handle_onion_message(&alice_id, &onion_message);
assert_ne!(refund.payer_id(), bob_id);
assert!(!refund.paths().is_empty());
for path in refund.paths() {
- let introduction_node_id = resolve_introduction_node(alice, &path);
- assert_eq!(introduction_node_id, bob_id);
- assert!(matches!(path.introduction_node, IntroductionNode::DirectedShortChannelId(..)));
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(bob_id));
}
expect_recent_payment!(bob, RecentPaymentDetails::AwaitingInvoice, payment_id);
let bob_id = bob.node.get_our_node_id();
let offer = alice.node
- .create_offer_builder().unwrap()
+ .create_offer_builder(None).unwrap()
.clear_paths()
.amount_msats(10_000_000)
.build().unwrap();
create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
- match nodes[0].node.create_offer_builder() {
+ match nodes[0].node.create_offer_builder(None) {
Ok(_) => panic!("Expected error"),
Err(e) => assert_eq!(e, Bolt12SemanticError::MissingPaths),
}
assert!(nodes[0].node.list_recent_payments().is_empty());
}
+/// Fails creating or paying an offer when a blinded path cannot be created because no peers are
+/// connected.
+#[test]
+fn fails_creating_or_paying_for_offer_without_connected_peers() {
+ let chanmon_cfgs = create_chanmon_cfgs(6);
+ let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
+ let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+ create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 5, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 5, 10_000_000, 1_000_000_000);
+
+ let (alice, bob, charlie, david) = (&nodes[0], &nodes[1], &nodes[2], &nodes[3]);
+
+ disconnect_peers(alice, &[bob, charlie, david, &nodes[4], &nodes[5]]);
+ disconnect_peers(david, &[bob, charlie, &nodes[4], &nodes[5]]);
+
+ let absolute_expiry = alice.node.duration_since_epoch() + MAX_SHORT_LIVED_RELATIVE_EXPIRY;
+ match alice.node.create_offer_builder(Some(absolute_expiry)) {
+ Ok(_) => panic!("Expected error"),
+ Err(e) => assert_eq!(e, Bolt12SemanticError::MissingPaths),
+ }
+
+ let mut args = ReconnectArgs::new(alice, bob);
+ args.send_channel_ready = (true, true);
+ reconnect_nodes(args);
+
+ let offer = alice.node
+ .create_offer_builder(Some(absolute_expiry)).unwrap()
+ .amount_msats(10_000_000)
+ .build().unwrap();
+
+ let payment_id = PaymentId([1; 32]);
+
+ match david.node.pay_for_offer(&offer, None, None, None, payment_id, Retry::Attempts(0), None) {
+ Ok(_) => panic!("Expected error"),
+ Err(e) => assert_eq!(e, Bolt12SemanticError::MissingPaths),
+ }
+
+ assert!(nodes[0].node.list_recent_payments().is_empty());
+
+ let mut args = ReconnectArgs::new(charlie, david);
+ args.send_channel_ready = (true, true);
+ reconnect_nodes(args);
+
+ assert!(
+ david.node.pay_for_offer(
+ &offer, None, None, None, payment_id, Retry::Attempts(0), None
+ ).is_ok()
+ );
+
+ expect_recent_payment!(david, RecentPaymentDetails::AwaitingInvoice, payment_id);
+}
+
+/// Fails creating or sending an invoice for a refund when a blinded path cannot be created because
+/// no peers are connected.
+#[test]
+fn fails_creating_refund_or_sending_invoice_without_connected_peers() {
+ let mut accept_forward_cfg = test_default_channel_config();
+ accept_forward_cfg.accept_forwards_to_priv_channels = true;
+
+ let mut features = channelmanager::provided_init_features(&accept_forward_cfg);
+ features.set_onion_messages_optional();
+ features.set_route_blinding_optional();
+
+ let chanmon_cfgs = create_chanmon_cfgs(6);
+ let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
+
+ *node_cfgs[1].override_init_features.borrow_mut() = Some(features);
+
+ let node_chanmgrs = create_node_chanmgrs(
+ 6, &node_cfgs, &[None, Some(accept_forward_cfg), None, None, None, None]
+ );
+ let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
+
+ create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+ create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 5, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 4, 10_000_000, 1_000_000_000);
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 5, 10_000_000, 1_000_000_000);
+
+ let (alice, bob, charlie, david) = (&nodes[0], &nodes[1], &nodes[2], &nodes[3]);
+
+ disconnect_peers(alice, &[bob, charlie, david, &nodes[4], &nodes[5]]);
+ disconnect_peers(david, &[bob, charlie, &nodes[4], &nodes[5]]);
+
+ let absolute_expiry = david.node.duration_since_epoch() + MAX_SHORT_LIVED_RELATIVE_EXPIRY;
+ let payment_id = PaymentId([1; 32]);
+ match david.node.create_refund_builder(
+ 10_000_000, absolute_expiry, payment_id, Retry::Attempts(0), None
+ ) {
+ Ok(_) => panic!("Expected error"),
+ Err(e) => assert_eq!(e, Bolt12SemanticError::MissingPaths),
+ }
+
+ let mut args = ReconnectArgs::new(charlie, david);
+ args.send_channel_ready = (true, true);
+ reconnect_nodes(args);
+
+ let refund = david.node
+ .create_refund_builder(10_000_000, absolute_expiry, payment_id, Retry::Attempts(0), None)
+ .unwrap()
+ .build().unwrap();
+
+ match alice.node.request_refund_payment(&refund) {
+ Ok(_) => panic!("Expected error"),
+ Err(e) => assert_eq!(e, Bolt12SemanticError::MissingPaths),
+ }
+
+ let mut args = ReconnectArgs::new(alice, bob);
+ args.send_channel_ready = (true, true);
+ reconnect_nodes(args);
+
+ assert!(alice.node.request_refund_payment(&refund).is_ok());
+}
+
/// Fails creating an invoice request when the offer contains an unsupported chain.
#[test]
fn fails_creating_invoice_request_for_unsupported_chain() {
let bob = &nodes[1];
let offer = alice.node
- .create_offer_builder().unwrap()
+ .create_offer_builder(None).unwrap()
.clear_chains()
.chain(Network::Signet)
.build().unwrap();
disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]);
let offer = alice.node
- .create_offer_builder().unwrap()
+ .create_offer_builder(None).unwrap()
.amount_msats(10_000_000)
.build().unwrap();
disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5]]);
let offer = alice.node
- .create_offer_builder().unwrap()
+ .create_offer_builder(None).unwrap()
.amount_msats(10_000_000)
.build().unwrap();
disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]);
let offer = alice.node
- .create_offer_builder().unwrap()
+ .create_offer_builder(None).unwrap()
.amount_msats(10_000_000)
.build().unwrap();
if expected_channel_update.is_some() {
match network_update {
Some(update) => match update {
- &NetworkUpdate::ChannelUpdateMessage { .. } => {
- if let NetworkUpdate::ChannelUpdateMessage { .. } = expected_channel_update.unwrap() {} else {
- panic!("channel_update not found!");
- }
- },
&NetworkUpdate::ChannelFailure { ref short_channel_id, ref is_permanent } => {
if let NetworkUpdate::ChannelFailure { short_channel_id: ref expected_short_channel_id, is_permanent: ref expected_is_permanent } = expected_channel_update.unwrap() {
assert!(*short_channel_id == *expected_short_channel_id);
claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_success);
// If the hop gives fee_insufficient but enough fees were provided, then the previous hop
- // malleated the payment before forwarding, taking funds when they shouldn't have.
+ // malleated the payment before forwarding, taking funds when they shouldn't have. However,
+ // because we ignore channel update contents, we will still blame the 2nd channel.
let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]);
- let short_channel_id = channels[0].0.contents.short_channel_id;
+ let short_channel_id = channels[1].0.contents.short_channel_id;
run_onion_failure_test("fee_insufficient", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| {
msg.amount_msat -= 1;
- }, || {}, true, Some(UPDATE|12), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: true}), Some(short_channel_id));
+ }, || {}, true, Some(UPDATE|12), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id));
// In an earlier version, we spuriously failed to forward payments if the expected feerate
// changed between the channel open and the payment.
let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), UPDATE|7, &err_data);
- }, ||{}, true, Some(UPDATE|7), Some(NetworkUpdate::ChannelUpdateMessage{msg: chan_update.clone()}), Some(short_channel_id));
+ }, ||{}, true, Some(UPDATE|7),
+ Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }),
+ Some(short_channel_id));
// Check we can still handle onion failures that include channel updates without a type prefix
let err_data_without_type = chan_update.encode_with_len();
let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), UPDATE|7, &err_data_without_type);
- }, ||{}, true, Some(UPDATE|7), Some(NetworkUpdate::ChannelUpdateMessage{msg: chan_update}), Some(short_channel_id));
+ }, ||{}, true, Some(UPDATE|7),
+ Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }),
+ Some(short_channel_id));
let short_channel_id = channels[1].0.contents.short_channel_id;
run_onion_failure_test_with_fail_intercept("permanent_channel_failure", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| {
let mut bogus_route = route.clone();
let route_len = bogus_route.paths[0].hops.len();
bogus_route.paths[0].hops[route_len-1].fee_msat = amt_to_forward;
- run_onion_failure_test("amount_below_minimum", 0, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(UPDATE|11), Some(NetworkUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy(short_channel_id)}), Some(short_channel_id));
+ run_onion_failure_test("amount_below_minimum", 0, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(UPDATE|11),
+ Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }),
+ Some(short_channel_id));
// Clear pending payments so that the following positive test has the correct payment hash.
for node in nodes.iter() {
let preimage = send_along_route(&nodes[0], bogus_route, &[&nodes[1], &nodes[2]], amt_to_forward+1).0;
claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], preimage);
- let short_channel_id = channels[0].0.contents.short_channel_id;
+ // We ignore channel update contents in onion errors, so will blame the 2nd channel even though
+ // the first node is the one that messed up.
+ let short_channel_id = channels[1].0.contents.short_channel_id;
run_onion_failure_test("fee_insufficient", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| {
msg.amount_msat -= 1;
- }, || {}, true, Some(UPDATE|12), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: true}), Some(short_channel_id));
+ }, || {}, true, Some(UPDATE|12), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id));
- let short_channel_id = channels[0].0.contents.short_channel_id;
+ let short_channel_id = channels[1].0.contents.short_channel_id;
run_onion_failure_test("incorrect_cltv_expiry", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| {
// need to violate: cltv_expiry - cltv_expiry_delta >= outgoing_cltv_value
msg.cltv_expiry -= 1;
- }, || {}, true, Some(UPDATE|13), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: true}), Some(short_channel_id));
+ }, || {}, true, Some(UPDATE|13), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id));
let short_channel_id = channels[1].0.contents.short_channel_id;
run_onion_failure_test("expiry_too_soon", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| {
connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
connect_blocks(&nodes[1], height - nodes[1].best_block_info().1);
connect_blocks(&nodes[2], height - nodes[2].best_block_info().1);
- }, ||{}, true, Some(UPDATE|14), Some(NetworkUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy(short_channel_id)}), Some(short_channel_id));
+ }, ||{}, true, Some(UPDATE|14),
+ Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }),
+ Some(short_channel_id));
run_onion_failure_test("unknown_payment_hash", 2, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || {
nodes[2].node.fail_htlc_backwards(&payment_hash);
// disconnect event to the channel between nodes[1] ~ nodes[2]
nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
- }, true, Some(UPDATE|7), Some(NetworkUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy(short_channel_id)}), Some(short_channel_id));
+ }, true, Some(UPDATE|7),
+ Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }),
+ Some(short_channel_id));
run_onion_failure_test("channel_disabled", 0, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || {
// disconnect event to the channel between nodes[1] ~ nodes[2]
for _ in 0..DISABLE_GOSSIP_TICKS + 1 {
}
nodes[1].node.get_and_clear_pending_msg_events();
nodes[2].node.get_and_clear_pending_msg_events();
- }, true, Some(UPDATE|20), Some(NetworkUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy(short_channel_id)}), Some(short_channel_id));
+ }, true, Some(UPDATE|20),
+ Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }),
+ Some(short_channel_id));
reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2]));
run_onion_failure_test("expiry_too_far", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| {
// We'll be attempting to route payments using the default ChannelUpdate for channels. This will
// lead to onion failures at the first hop once we update the ChannelConfig for the
// second hop.
- let expect_onion_failure = |name: &str, error_code: u16, channel_update: &msgs::ChannelUpdate| {
+ let expect_onion_failure = |name: &str, error_code: u16| {
let short_channel_id = channel_to_update.1;
- let network_update = NetworkUpdate::ChannelUpdateMessage { msg: channel_update.clone() };
+ let network_update = NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false };
run_onion_failure_test(
name, 0, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || {}, true,
Some(error_code), Some(network_update), Some(short_channel_id),
// Connect a block, which should expire the previous config, leading to a failure when
// forwarding the HTLC.
expire_prev_config();
- expect_onion_failure("fee_insufficient", UPDATE|12, &msg);
+ expect_onion_failure("fee_insufficient", UPDATE|12);
// Redundant updates should not trigger a new ChannelUpdate.
assert!(update_and_get_channel_update(&config, false, None, false).is_none());
// new ChannelUpdate.
config.forwarding_fee_base_msat = default_config.forwarding_fee_base_msat;
config.cltv_expiry_delta = u16::max_value();
- let msg = update_and_get_channel_update(&config, true, Some(&msg), true).unwrap();
- expect_onion_failure("incorrect_cltv_expiry", UPDATE|13, &msg);
+ assert!(update_and_get_channel_update(&config, true, Some(&msg), true).is_some());
+ expect_onion_failure("incorrect_cltv_expiry", UPDATE|13);
// Reset the proportional fee and increase the CLTV expiry delta which should trigger a new
// ChannelUpdate.
config.cltv_expiry_delta = default_config.cltv_expiry_delta;
config.forwarding_fee_proportional_millionths = u32::max_value();
- let msg = update_and_get_channel_update(&config, true, Some(&msg), true).unwrap();
- expect_onion_failure("fee_insufficient", UPDATE|12, &msg);
+ assert!(update_and_get_channel_update(&config, true, Some(&msg), true).is_some());
+ expect_onion_failure("fee_insufficient", UPDATE|12);
// To test persistence of the updated config, we'll re-initialize the ChannelManager.
let config_after_restart = {
err_data.extend_from_slice(&channel.1.encode());
let mut fail_conditions = PaymentFailedConditions::new()
- .blamed_scid(channel.0.contents.short_channel_id)
+ .blamed_scid(route.paths[0].hops.last().as_ref().unwrap().short_channel_id)
.blamed_chan_closed(false)
.expected_htlc_error_data(0x1000 | 7, &err_data);
- expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions);
+ expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions);
}
#[test]
use crate::ln::features::{ChannelFeatures, NodeFeatures};
use crate::ln::msgs;
use crate::ln::types::{PaymentHash, PaymentPreimage};
-use crate::ln::wire::Encode;
use crate::routing::gossip::NetworkUpdate;
use crate::routing::router::{Path, RouteHop, RouteParameters};
use crate::sign::NodeSigner;
{
let update_len =
u16::from_be_bytes(update_len_slice.try_into().expect("len is 2")) as usize;
- if let Some(mut update_slice) = err_packet
+ if err_packet
.failuremsg
.get(debug_field_size + 4..debug_field_size + 4 + update_len)
+ .is_some()
{
- // Historically, the BOLTs were unclear if the message type
- // bytes should be included here or not. The BOLTs have now
- // been updated to indicate that they *are* included, but many
- // nodes still send messages without the type bytes, so we
- // support both here.
- // TODO: Switch to hard require the type prefix, as the current
- // permissiveness introduces the (although small) possibility
- // that we fail to decode legitimate channel updates that
- // happen to start with ChannelUpdate::TYPE, i.e., [0x01, 0x02].
- if update_slice.len() > 2
- && update_slice[0..2] == msgs::ChannelUpdate::TYPE.to_be_bytes()
- {
- update_slice = &update_slice[2..];
- } else {
- log_trace!(logger, "Failure provided features a channel update without type prefix. Deprecated, but allowing for now.");
- }
- let update_opt = msgs::ChannelUpdate::read(&mut Cursor::new(&update_slice));
- if update_opt.is_ok() || update_slice.is_empty() {
- // if channel_update should NOT have caused the failure:
- // MAY treat the channel_update as invalid.
- let is_chan_update_invalid = match error_code & 0xff {
- 7 => false,
- 11 => {
- update_opt.is_ok()
- && amt_to_forward
- > update_opt.as_ref().unwrap().contents.htlc_minimum_msat
- },
- 12 => {
- update_opt.is_ok()
- && amt_to_forward
- .checked_mul(
- update_opt
- .as_ref()
- .unwrap()
- .contents
- .fee_proportional_millionths as u64,
- )
- .map(|prop_fee| prop_fee / 1_000_000)
- .and_then(|prop_fee| {
- prop_fee.checked_add(
- update_opt.as_ref().unwrap().contents.fee_base_msat
- as u64,
- )
- })
- .map(|fee_msats| route_hop.fee_msat >= fee_msats)
- .unwrap_or(false)
- },
- 13 => {
- update_opt.is_ok()
- && route_hop.cltv_expiry_delta as u16
- >= update_opt.as_ref().unwrap().contents.cltv_expiry_delta
- },
- 14 => false, // expiry_too_soon; always valid?
- 20 => update_opt.as_ref().unwrap().contents.flags & 2 == 0,
- _ => false, // unknown error code; take channel_update as valid
- };
- if is_chan_update_invalid {
- // This probably indicates the node which forwarded
- // to the node in question corrupted something.
- network_update = Some(NetworkUpdate::ChannelFailure {
- short_channel_id: route_hop.short_channel_id,
- is_permanent: true,
- });
- } else {
- if let Ok(chan_update) = update_opt {
- // Make sure the ChannelUpdate contains the expected
- // short channel id.
- if failing_route_hop.short_channel_id
- == chan_update.contents.short_channel_id
- {
- short_channel_id = Some(failing_route_hop.short_channel_id);
- } else {
- log_info!(logger, "Node provided a channel_update for which it was not authoritative, ignoring.");
- }
- network_update =
- Some(NetworkUpdate::ChannelUpdateMessage { msg: chan_update })
- } else {
- // The node in question intentionally encoded a 0-length channel update. This is
- // likely due to https://github.com/ElementsProject/lightning/issues/6200.
- short_channel_id = Some(failing_route_hop.short_channel_id);
- network_update = Some(NetworkUpdate::ChannelFailure {
- short_channel_id: failing_route_hop.short_channel_id,
- is_permanent: false,
- });
- }
- };
- } else {
- // If the channel_update had a non-zero length (i.e. was
- // present) but we couldn't read it, treat it as a total
- // node failure.
- log_info!(
- logger,
- "Failed to read a channel_update of len {} in an onion",
- update_slice.len()
- );
- }
+ network_update = Some(NetworkUpdate::ChannelFailure {
+ short_channel_id: failing_route_hop.short_channel_id,
+ is_permanent: false,
+ });
+ short_channel_id = Some(failing_route_hop.short_channel_id);
}
}
if network_update.is_none() {
use crate::ln::types::ChannelId;
use crate::ln::features::{InitFeatures, NodeFeatures};
use crate::ln::msgs;
-use crate::ln::msgs::{ChannelMessageHandler, LightningError, SocketAddress, OnionMessageHandler, RoutingMessageHandler};
+use crate::ln::msgs::{ChannelMessageHandler, Init, LightningError, SocketAddress, OnionMessageHandler, RoutingMessageHandler};
use crate::util::ser::{VecWriter, Writeable, Writer};
use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor, NextNoiseStep, MessageBuf, MSG_BUF_ALLOC_SIZE};
use crate::ln::wire;
/// connection to the node exists, then the message is simply not sent.
fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)>;
+ /// Indicates a peer disconnected.
+ fn peer_disconnected(&self, their_node_id: &PublicKey);
+
+ /// Handle a peer connecting.
+ ///
+ /// May return an `Err(())` if the features the peer supports are not sufficient to communicate
+ /// with us. Implementors should be somewhat conservative about doing so, however, as other
+ /// message handlers may still wish to communicate with this peer.
+ fn peer_connected(&self, their_node_id: &PublicKey, msg: &Init, inbound: bool) -> Result<(), ()>;
+
/// Gets the node feature flags which this handler itself supports. All available handlers are
/// queried similarly and their feature flags are OR'd together to form the [`NodeFeatures`]
/// which are broadcasted in our [`NodeAnnouncement`] message.
fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> { Vec::new() }
+ fn peer_disconnected(&self, _their_node_id: &PublicKey) {}
+
+ fn peer_connected(&self, _their_node_id: &PublicKey, _msg: &Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
+
fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
log_debug!(logger, "Onion Message Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
return Err(PeerHandleError { }.into());
}
+ if let Err(()) = self.message_handler.custom_message_handler.peer_connected(&their_node_id, &msg, peer_lock.inbound_connection) {
+ log_debug!(logger, "Custom Message Handler decided we couldn't communicate with peer {}", log_pubkey!(their_node_id));
+ return Err(PeerHandleError { }.into());
+ }
peer_lock.awaiting_pong_timer_tick_intervals = 0;
peer_lock.their_features = Some(msg.features);
log_trace!(WithContext::from(&self.logger, Some(node_id), None, None), "Disconnecting peer with id {} due to {}", node_id, reason);
self.message_handler.chan_handler.peer_disconnected(&node_id);
self.message_handler.onion_message_handler.peer_disconnected(&node_id);
+ self.message_handler.custom_message_handler.peer_disconnected(&node_id);
}
descriptor.disconnect_socket();
}
if !peer.handshake_complete() { return; }
self.message_handler.chan_handler.peer_disconnected(&node_id);
self.message_handler.onion_message_handler.peer_disconnected(&node_id);
+ self.message_handler.custom_message_handler.peer_disconnected(&node_id);
}
}
};
use crate::ln::peer_channel_encryptor::PeerChannelEncryptor;
use crate::ln::peer_handler::{CustomMessageHandler, PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses, ErroringMessageHandler, MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER};
use crate::ln::{msgs, wire};
- use crate::ln::msgs::{LightningError, SocketAddress};
+ use crate::ln::msgs::{Init, LightningError, SocketAddress};
use crate::util::test_utils;
use bitcoin::Network;
fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> { Vec::new() }
+
+ fn peer_disconnected(&self, _their_node_id: &PublicKey) {}
+
+ fn peer_connected(&self, _their_node_id: &PublicKey, _msg: &Init, _inbound: bool) -> Result<(), ()> { Ok(()) }
+
fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
fn provided_init_features(&self, _: &PublicKey) -> InitFeatures {
check_closed_events(&nodes[0], &close);
assert_eq!(nodes[0].node.list_channels().len(), 0);
}
+
+#[test]
+fn test_force_closure_on_low_stale_fee() {
+ // Check that we force-close channels if they have a low fee and that has gotten stale (without
+ // update).
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+
+ // Start by connecting lots of blocks to give LDK some feerate history
+ for _ in 0..super::channelmanager::FEERATE_TRACKING_BLOCKS * 2 {
+ connect_blocks(&nodes[1], 1);
+ }
+
+ // Now connect a handful of blocks with a "high" feerate
+ {
+ let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock *= 2;
+ }
+ for _ in 0..super::channelmanager::FEERATE_TRACKING_BLOCKS - 1 {
+ connect_blocks(&nodes[1], 1);
+ }
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+ // Now, note that one more block would cause us to force-close, it won't because we've dropped
+ // the feerate
+ {
+ let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock /= 2;
+ }
+ connect_blocks(&nodes[1], super::channelmanager::FEERATE_TRACKING_BLOCKS as u32 * 2);
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+ // Now, connect another FEERATE_TRACKING_BLOCKS - 1 blocks at a high feerate, note that none of
+ // these will cause a force-closure because LDK only looks at the minimium feerate over the
+ // last FEERATE_TRACKING_BLOCKS blocks.
+ {
+ let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock *= 2;
+ }
+
+ for _ in 0..super::channelmanager::FEERATE_TRACKING_BLOCKS - 1 {
+ connect_blocks(&nodes[1], 1);
+ }
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+ // Finally, connect one more block and check the force-close happened.
+ connect_blocks(&nodes[1], 1);
+ check_added_monitors!(nodes[1], 1);
+ check_closed_broadcast(&nodes[1], 1, true);
+ let reason = ClosureReason::PeerFeerateTooLow { peer_feerate_sat_per_kw: 253, required_feerate_sat_per_kw: 253 * 2 };
+ check_closed_events(&nodes[1], &[ExpectedCloseEvent::from_id_reason(chan_id, false, reason)]);
+}
/// # })
/// # }
/// # fn create_blinded_paths<T: secp256k1::Signing + secp256k1::Verification>(
-/// # &self, _recipient: PublicKey, _peers: Vec<ForwardNode>, _secp_ctx: &Secp256k1<T>
+/// # &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _secp_ctx: &Secp256k1<T>
/// # ) -> Result<Vec<BlindedPath>, ()> {
/// # unreachable!()
/// # }
fn create_blinded_paths<
T: secp256k1::Signing + secp256k1::Verification
>(
- &self, recipient: PublicKey, peers: Vec<ForwardNode>, secp_ctx: &Secp256k1<T>,
+ &self, recipient: PublicKey, peers: Vec<PublicKey>, secp_ctx: &Secp256k1<T>,
) -> Result<Vec<BlindedPath>, ()>;
+
+ /// Creates compact [`BlindedPath`]s to the `recipient` node. The nodes in `peers` are assumed
+ /// to be direct peers with the `recipient`.
+ ///
+ /// Compact blinded paths use short channel ids instead of pubkeys for a smaller serialization,
+ /// which is beneficial when a QR code is used to transport the data. The SCID is passed using a
+ /// [`ForwardNode`] but may be `None` for graceful degradation.
+ ///
+ /// Implementations using additional intermediate nodes are responsible for using a
+ /// [`ForwardNode`] with `Some` short channel id, if possible. Similarly, implementations should
+ /// call [`BlindedPath::use_compact_introduction_node`].
+ ///
+ /// The provided implementation simply delegates to [`MessageRouter::create_blinded_paths`],
+ /// ignoring the short channel ids.
+ fn create_compact_blinded_paths<
+ T: secp256k1::Signing + secp256k1::Verification
+ >(
+ &self, recipient: PublicKey, peers: Vec<ForwardNode>, secp_ctx: &Secp256k1<T>,
+ ) -> Result<Vec<BlindedPath>, ()> {
+ let peers = peers
+ .into_iter()
+ .map(|ForwardNode { node_id, short_channel_id: _ }| node_id)
+ .collect();
+ self.create_blinded_paths(recipient, peers, secp_ctx)
+ }
}
/// A [`MessageRouter`] that can only route to a directly connected [`Destination`].
+///
+/// # Privacy
+///
+/// Creating [`BlindedPath`]s may affect privacy since, if a suitable path cannot be found, it will
+/// create a one-hop path using the recipient as the introduction node if it is a announced node.
+/// Otherwise, there is no way to find a path to the introduction node in order to send a message,
+/// and thus an `Err` is returned.
pub struct DefaultMessageRouter<G: Deref<Target=NetworkGraph<L>>, L: Deref, ES: Deref>
where
L::Target: Logger,
pub fn new(network_graph: G, entropy_source: ES) -> Self {
Self { network_graph, entropy_source }
}
-}
-
-impl<G: Deref<Target=NetworkGraph<L>>, L: Deref, ES: Deref> MessageRouter for DefaultMessageRouter<G, L, ES>
-where
- L::Target: Logger,
- ES::Target: EntropySource,
-{
- fn find_path(
- &self, sender: PublicKey, peers: Vec<PublicKey>, mut destination: Destination
- ) -> Result<OnionMessagePath, ()> {
- let network_graph = self.network_graph.deref().read_only();
- destination.resolve(&network_graph);
-
- let first_node = match destination.first_node() {
- Some(first_node) => first_node,
- None => return Err(()),
- };
-
- if peers.contains(&first_node) || sender == first_node {
- Ok(OnionMessagePath {
- intermediate_nodes: vec![], destination, first_node_addresses: None
- })
- } else {
- let node_details = network_graph
- .node(&NodeId::from_pubkey(&first_node))
- .and_then(|node_info| node_info.announcement_info.as_ref())
- .map(|announcement_info| (announcement_info.features(), announcement_info.addresses()));
-
- match node_details {
- Some((features, addresses)) if features.supports_onion_messages() && addresses.len() > 0 => {
- let first_node_addresses = Some(addresses.clone());
- Ok(OnionMessagePath {
- intermediate_nodes: vec![], destination, first_node_addresses
- })
- },
- _ => Err(()),
- }
- }
- }
- fn create_blinded_paths<
+ fn create_blinded_paths_from_iter<
+ I: Iterator<Item = ForwardNode>,
T: secp256k1::Signing + secp256k1::Verification
>(
- &self, recipient: PublicKey, peers: Vec<ForwardNode>, secp_ctx: &Secp256k1<T>,
+ &self, recipient: PublicKey, peers: I, secp_ctx: &Secp256k1<T>, compact_paths: bool
) -> Result<Vec<BlindedPath>, ()> {
// Limit the number of blinded paths that are computed.
const MAX_PATHS: usize = 3;
let is_recipient_announced =
network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient));
- let mut peer_info = peers.into_iter()
+ let mut peer_info = peers
// Limit to peers with announced channels
.filter_map(|peer|
network_graph
}
},
}?;
- for path in &mut paths {
- path.use_compact_introduction_node(&network_graph);
+
+ if compact_paths {
+ for path in &mut paths {
+ path.use_compact_introduction_node(&network_graph);
+ }
}
Ok(paths)
}
}
+impl<G: Deref<Target=NetworkGraph<L>>, L: Deref, ES: Deref> MessageRouter for DefaultMessageRouter<G, L, ES>
+where
+ L::Target: Logger,
+ ES::Target: EntropySource,
+{
+ fn find_path(
+ &self, sender: PublicKey, peers: Vec<PublicKey>, mut destination: Destination
+ ) -> Result<OnionMessagePath, ()> {
+ let network_graph = self.network_graph.deref().read_only();
+ destination.resolve(&network_graph);
+
+ let first_node = match destination.first_node() {
+ Some(first_node) => first_node,
+ None => return Err(()),
+ };
+
+ if peers.contains(&first_node) || sender == first_node {
+ Ok(OnionMessagePath {
+ intermediate_nodes: vec![], destination, first_node_addresses: None
+ })
+ } else {
+ let node_details = network_graph
+ .node(&NodeId::from_pubkey(&first_node))
+ .and_then(|node_info| node_info.announcement_info.as_ref())
+ .map(|announcement_info| (announcement_info.features(), announcement_info.addresses()));
+
+ match node_details {
+ Some((features, addresses)) if features.supports_onion_messages() && addresses.len() > 0 => {
+ let first_node_addresses = Some(addresses.clone());
+ Ok(OnionMessagePath {
+ intermediate_nodes: vec![], destination, first_node_addresses
+ })
+ },
+ _ => Err(()),
+ }
+ }
+ }
+
+ fn create_blinded_paths<
+ T: secp256k1::Signing + secp256k1::Verification
+ >(
+ &self, recipient: PublicKey, peers: Vec<PublicKey>, secp_ctx: &Secp256k1<T>,
+ ) -> Result<Vec<BlindedPath>, ()> {
+ let peers = peers
+ .into_iter()
+ .map(|node_id| ForwardNode { node_id, short_channel_id: None });
+ self.create_blinded_paths_from_iter(recipient, peers, secp_ctx, false)
+ }
+
+ fn create_compact_blinded_paths<
+ T: secp256k1::Signing + secp256k1::Verification
+ >(
+ &self, recipient: PublicKey, peers: Vec<ForwardNode>, secp_ctx: &Secp256k1<T>,
+ ) -> Result<Vec<BlindedPath>, ()> {
+ self.create_blinded_paths_from_iter(recipient, peers.into_iter(), secp_ctx, true)
+ }
+}
+
/// A path for sending an [`OnionMessage`].
#[derive(Clone)]
pub struct OnionMessagePath {
let peers = self.message_recipients.lock().unwrap()
.iter()
.filter(|(_, peer)| matches!(peer, OnionMessageRecipient::ConnectedPeer(_)))
- .map(|(node_id, _ )| ForwardNode {
- node_id: *node_id,
- short_channel_id: None,
- })
+ .map(|(node_id, _ )| *node_id)
.collect::<Vec<_>>();
self.message_router
use bitcoin::amount::Amount;
use bitcoin::blockdata::constants::ChainHash;
+use bitcoin::secp256k1;
use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
-use bitcoin::secp256k1::{PublicKey, Verification};
use bitcoin::secp256k1::Secp256k1;
-use bitcoin::secp256k1;
+use bitcoin::secp256k1::{PublicKey, Verification};
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
use bitcoin::hashes::Hash;
use bitcoin::network::Network;
use crate::events::{MessageSendEvent, MessageSendEventsProvider};
-use crate::ln::types::ChannelId;
-use crate::ln::features::{ChannelFeatures, NodeFeatures, InitFeatures};
-use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, SocketAddress, MAX_VALUE_MSAT};
-use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, GossipTimestampFilter};
-use crate::ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd};
+use crate::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
use crate::ln::msgs;
+use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, GossipTimestampFilter, NodeAnnouncement};
+use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, SocketAddress, MAX_VALUE_MSAT};
+use crate::ln::msgs::{QueryChannelRange, QueryShortChannelIds, ReplyChannelRange, ReplyShortChannelIdsEnd};
+use crate::ln::types::ChannelId;
use crate::routing::utxo::{self, UtxoLookup, UtxoResolver};
-use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, MaybeReadable};
-use crate::util::logger::{Logger, Level};
+use crate::util::indexed_map::{Entry as IndexedMapEntry, IndexedMap};
+use crate::util::logger::{Level, Logger};
use crate::util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK};
+use crate::util::ser::{MaybeReadable, Readable, ReadableArgs, RequiredWrapper, Writeable, Writer};
use crate::util::string::PrintableString;
-use crate::util::indexed_map::{IndexedMap, Entry as IndexedMapEntry};
use crate::io;
use crate::io_extras::{copy, sink};
use crate::prelude::*;
-use core::{cmp, fmt};
-use crate::sync::{RwLock, RwLockReadGuard, LockTestExt};
-#[cfg(feature = "std")]
-use core::sync::atomic::{AtomicUsize, Ordering};
use crate::sync::Mutex;
+use crate::sync::{LockTestExt, RwLock, RwLockReadGuard};
use core::ops::{Bound, Deref};
use core::str::FromStr;
+#[cfg(feature = "std")]
+use core::sync::atomic::{AtomicUsize, Ordering};
+use core::{cmp, fmt};
#[cfg(feature = "std")]
use std::time::{SystemTime, UNIX_EPOCH};
/// [BOLT #4]: https://github.com/lightning/bolts/blob/master/04-onion-routing.md
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum NetworkUpdate {
- /// An error indicating a `channel_update` messages should be applied via
- /// [`NetworkGraph::update_channel`].
- ChannelUpdateMessage {
- /// The update to apply via [`NetworkGraph::update_channel`].
- msg: ChannelUpdate,
- },
/// An error indicating that a channel failed to route a payment, which should be applied via
/// [`NetworkGraph::channel_failed_permanent`] if permanent.
ChannelFailure {
}
}
-impl_writeable_tlv_based_enum_upgradable!(NetworkUpdate,
- (0, ChannelUpdateMessage) => {
- (0, msg, required),
- },
- (2, ChannelFailure) => {
- (0, short_channel_id, required),
- (2, is_permanent, required),
- },
- (4, NodeFailure) => {
- (0, node_id, required),
- (2, is_permanent, required),
- },
-);
+impl Writeable for NetworkUpdate {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+ match self {
+ Self::ChannelFailure { short_channel_id, is_permanent } => {
+ 2u8.write(writer)?;
+ write_tlv_fields!(writer, {
+ (0, short_channel_id, required),
+ (2, is_permanent, required),
+ });
+ },
+ Self::NodeFailure { node_id, is_permanent } => {
+ 4u8.write(writer)?;
+ write_tlv_fields!(writer, {
+ (0, node_id, required),
+ (2, is_permanent, required),
+ });
+ }
+ }
+ Ok(())
+ }
+}
+
+impl MaybeReadable for NetworkUpdate {
+ fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
+ let id: u8 = Readable::read(reader)?;
+ match id {
+ 0 => {
+ // 0 was previously used for network updates containing a channel update, subsequently
+ // removed in LDK version 0.0.124.
+ let mut msg: RequiredWrapper<ChannelUpdate> = RequiredWrapper(None);
+ read_tlv_fields!(reader, {
+ (0, msg, required),
+ });
+ Ok(Some(Self::ChannelFailure {
+ short_channel_id: msg.0.unwrap().contents.short_channel_id,
+ is_permanent: false
+ }))
+ },
+ 2 => {
+ _init_and_read_len_prefixed_tlv_fields!(reader, {
+ (0, short_channel_id, required),
+ (2, is_permanent, required),
+ });
+ Ok(Some(Self::ChannelFailure {
+ short_channel_id: short_channel_id.0.unwrap(),
+ is_permanent: is_permanent.0.unwrap(),
+ }))
+ },
+ 4 => {
+ _init_and_read_len_prefixed_tlv_fields!(reader, {
+ (0, node_id, required),
+ (2, is_permanent, required),
+ });
+ Ok(Some(Self::NodeFailure {
+ node_id: node_id.0.unwrap(),
+ is_permanent: is_permanent.0.unwrap(),
+ }))
+ }
+ t if t % 2 == 0 => Err(DecodeError::UnknownRequiredFeature),
+ _ => Ok(None),
+ }
+ }
+}
/// Receives and validates network updates from peers,
/// stores authentic and relevant data as a network graph.
impl<L: Deref> NetworkGraph<L> where L::Target: Logger {
/// Handles any network updates originating from [`Event`]s.
- //
- /// Note that this will skip applying any [`NetworkUpdate::ChannelUpdateMessage`] to avoid
- /// leaking possibly identifying information of the sender to the public network.
///
/// [`Event`]: crate::events::Event
pub fn handle_network_update(&self, network_update: &NetworkUpdate) {
match *network_update {
- NetworkUpdate::ChannelUpdateMessage { ref msg } => {
- let short_channel_id = msg.contents.short_channel_id;
- let is_enabled = msg.contents.flags & (1 << 1) != (1 << 1);
- let status = if is_enabled { "enabled" } else { "disabled" };
- log_debug!(self.logger, "Skipping application of a channel update from a payment failure. Channel {} is {}.", short_channel_id, status);
- },
NetworkUpdate::ChannelFailure { short_channel_id, is_permanent } => {
if is_permanent {
log_debug!(self.logger, "Removing channel graph entry for {} due to a payment failure.", short_channel_id);
let short_channel_id;
{
- // Check we won't apply an update via `handle_network_update` for privacy reasons, but
- // can continue fine if we manually apply it.
+ // Check that we can manually apply a channel update.
let valid_channel_announcement = get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
short_channel_id = valid_channel_announcement.contents.short_channel_id;
let chain_source: Option<&test_utils::TestChainSource> = None;
assert!(network_graph.read_only().channels().get(&short_channel_id).is_some());
let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx);
- assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none());
-
- network_graph.handle_network_update(&NetworkUpdate::ChannelUpdateMessage {
- msg: valid_channel_update.clone(),
- });
assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_none());
network_graph.update_channel(&valid_channel_update).unwrap();
+ assert!(network_graph.read_only().channels().get(&short_channel_id).unwrap().one_to_two.is_some());
}
// Non-permanent failure doesn't touch the channel at all
use core::ops::Deref;
/// A [`Router`] implemented using [`find_route`].
+///
+/// # Privacy
+///
+/// Implements [`MessageRouter`] by delegating to [`DefaultMessageRouter`]. See those docs for
+/// privacy implications.
pub struct DefaultRouter<G: Deref<Target = NetworkGraph<L>> + Clone, L: Deref, ES: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> where
L::Target: Logger,
S::Target: for <'a> LockableScore<'a, ScoreLookUp = Sc>,
fn create_blinded_paths<
T: secp256k1::Signing + secp256k1::Verification
> (
- &self, recipient: PublicKey, peers: Vec<message::ForwardNode>, secp_ctx: &Secp256k1<T>,
+ &self, recipient: PublicKey, peers: Vec<PublicKey>, secp_ctx: &Secp256k1<T>,
) -> Result<Vec<BlindedPath>, ()> {
self.message_router.create_blinded_paths(recipient, peers, secp_ctx)
}
+
+ fn create_compact_blinded_paths<
+ T: secp256k1::Signing + secp256k1::Verification
+ > (
+ &self, recipient: PublicKey, peers: Vec<message::ForwardNode>, secp_ctx: &Secp256k1<T>,
+ ) -> Result<Vec<BlindedPath>, ()> {
+ self.message_router.create_compact_blinded_paths(recipient, peers, secp_ctx)
+ }
}
/// A trait defining behavior for routing a payment.
fn create_blinded_paths<
T: secp256k1::Signing + secp256k1::Verification
>(
- &self, recipient: PublicKey, peers: Vec<ForwardNode>, secp_ctx: &Secp256k1<T>,
+ &self, recipient: PublicKey, peers: Vec<PublicKey>, secp_ctx: &Secp256k1<T>,
) -> Result<Vec<BlindedPath>, ()> {
self.router.create_blinded_paths(recipient, peers, secp_ctx)
}
+
+ fn create_compact_blinded_paths<
+ T: secp256k1::Signing + secp256k1::Verification
+ >(
+ &self, recipient: PublicKey, peers: Vec<ForwardNode>, secp_ctx: &Secp256k1<T>,
+ ) -> Result<Vec<BlindedPath>, ()> {
+ self.router.create_compact_blinded_paths(recipient, peers, secp_ctx)
+ }
}
impl<'a> Drop for TestRouter<'a> {
}
fn create_blinded_paths<T: secp256k1::Signing + secp256k1::Verification>(
- &self, recipient: PublicKey, peers: Vec<ForwardNode>, secp_ctx: &Secp256k1<T>,
+ &self, recipient: PublicKey, peers: Vec<PublicKey>, secp_ctx: &Secp256k1<T>,
) -> Result<Vec<BlindedPath>, ()> {
self.inner.create_blinded_paths(recipient, peers, secp_ctx)
}
+
+ fn create_compact_blinded_paths<T: secp256k1::Signing + secp256k1::Verification>(
+ &self, recipient: PublicKey, peers: Vec<ForwardNode>, secp_ctx: &Secp256k1<T>,
+ ) -> Result<Vec<BlindedPath>, ()> {
+ self.inner.create_compact_blinded_paths(recipient, peers, secp_ctx)
+ }
}
pub struct OnlyReadsKeysInterface {}
./bench/benches/bench.rs
-./fuzz/src/base32.rs
-./fuzz/src/bech32_parse.rs
-./fuzz/src/bin/base32_target.rs
-./fuzz/src/bin/bech32_parse_target.rs
-./fuzz/src/bin/bolt11_deser_target.rs
-./fuzz/src/bin/chanmon_consistency_target.rs
-./fuzz/src/bin/chanmon_deser_target.rs
-./fuzz/src/bin/fromstr_to_netaddress_target.rs
-./fuzz/src/bin/full_stack_target.rs
-./fuzz/src/bin/indexedmap_target.rs
-./fuzz/src/bin/invoice_deser_target.rs
-./fuzz/src/bin/invoice_request_deser_target.rs
-./fuzz/src/bin/msg_accept_channel_target.rs
-./fuzz/src/bin/msg_accept_channel_v2_target.rs
-./fuzz/src/bin/msg_announcement_signatures_target.rs
-./fuzz/src/bin/msg_channel_announcement_target.rs
-./fuzz/src/bin/msg_channel_details_target.rs
-./fuzz/src/bin/msg_channel_ready_target.rs
-./fuzz/src/bin/msg_channel_reestablish_target.rs
-./fuzz/src/bin/msg_channel_update_target.rs
-./fuzz/src/bin/msg_closing_signed_target.rs
-./fuzz/src/bin/msg_commitment_signed_target.rs
-./fuzz/src/bin/msg_decoded_onion_error_packet_target.rs
-./fuzz/src/bin/msg_error_message_target.rs
-./fuzz/src/bin/msg_funding_created_target.rs
-./fuzz/src/bin/msg_funding_signed_target.rs
-./fuzz/src/bin/msg_gossip_timestamp_filter_target.rs
-./fuzz/src/bin/msg_init_target.rs
-./fuzz/src/bin/msg_node_announcement_target.rs
-./fuzz/src/bin/msg_open_channel_target.rs
-./fuzz/src/bin/msg_open_channel_v2_target.rs
-./fuzz/src/bin/msg_ping_target.rs
-./fuzz/src/bin/msg_pong_target.rs
-./fuzz/src/bin/msg_query_channel_range_target.rs
-./fuzz/src/bin/msg_query_short_channel_ids_target.rs
-./fuzz/src/bin/msg_reply_channel_range_target.rs
-./fuzz/src/bin/msg_reply_short_channel_ids_end_target.rs
-./fuzz/src/bin/msg_revoke_and_ack_target.rs
-./fuzz/src/bin/msg_shutdown_target.rs
-./fuzz/src/bin/msg_splice_ack_target.rs
-./fuzz/src/bin/msg_splice_locked_target.rs
-./fuzz/src/bin/msg_splice_target.rs
-./fuzz/src/bin/msg_stfu_target.rs
-./fuzz/src/bin/msg_tx_abort_target.rs
-./fuzz/src/bin/msg_tx_ack_rbf_target.rs
-./fuzz/src/bin/msg_tx_add_input_target.rs
-./fuzz/src/bin/msg_tx_add_output_target.rs
-./fuzz/src/bin/msg_tx_complete_target.rs
-./fuzz/src/bin/msg_tx_init_rbf_target.rs
-./fuzz/src/bin/msg_tx_remove_input_target.rs
-./fuzz/src/bin/msg_tx_remove_output_target.rs
-./fuzz/src/bin/msg_tx_signatures_target.rs
-./fuzz/src/bin/msg_update_add_htlc_target.rs
-./fuzz/src/bin/msg_update_fail_htlc_target.rs
-./fuzz/src/bin/msg_update_fail_malformed_htlc_target.rs
-./fuzz/src/bin/msg_update_fee_target.rs
-./fuzz/src/bin/msg_update_fulfill_htlc_target.rs
-./fuzz/src/bin/offer_deser_target.rs
-./fuzz/src/bin/onion_hop_data_target.rs
-./fuzz/src/bin/onion_message_target.rs
-./fuzz/src/bin/peer_crypt_target.rs
-./fuzz/src/bin/process_network_graph_target.rs
-./fuzz/src/bin/refund_deser_target.rs
-./fuzz/src/bin/router_target.rs
-./fuzz/src/bin/zbase32_target.rs
./fuzz/src/chanmon_consistency.rs
-./fuzz/src/chanmon_deser.rs
-./fuzz/src/fromstr_to_netaddress.rs
./fuzz/src/full_stack.rs
-./fuzz/src/indexedmap.rs
-./fuzz/src/invoice_deser.rs
-./fuzz/src/invoice_request_deser.rs
./fuzz/src/lib.rs
-./fuzz/src/msg_targets/mod.rs
-./fuzz/src/msg_targets/msg_accept_channel.rs
-./fuzz/src/msg_targets/msg_accept_channel_v2.rs
-./fuzz/src/msg_targets/msg_announcement_signatures.rs
-./fuzz/src/msg_targets/msg_channel_announcement.rs
-./fuzz/src/msg_targets/msg_channel_details.rs
-./fuzz/src/msg_targets/msg_channel_ready.rs
-./fuzz/src/msg_targets/msg_channel_reestablish.rs
-./fuzz/src/msg_targets/msg_channel_update.rs
-./fuzz/src/msg_targets/msg_closing_signed.rs
-./fuzz/src/msg_targets/msg_commitment_signed.rs
-./fuzz/src/msg_targets/msg_decoded_onion_error_packet.rs
-./fuzz/src/msg_targets/msg_error_message.rs
-./fuzz/src/msg_targets/msg_funding_created.rs
-./fuzz/src/msg_targets/msg_funding_signed.rs
-./fuzz/src/msg_targets/msg_gossip_timestamp_filter.rs
-./fuzz/src/msg_targets/msg_init.rs
-./fuzz/src/msg_targets/msg_node_announcement.rs
-./fuzz/src/msg_targets/msg_open_channel.rs
-./fuzz/src/msg_targets/msg_open_channel_v2.rs
-./fuzz/src/msg_targets/msg_ping.rs
-./fuzz/src/msg_targets/msg_pong.rs
-./fuzz/src/msg_targets/msg_query_channel_range.rs
-./fuzz/src/msg_targets/msg_query_short_channel_ids.rs
-./fuzz/src/msg_targets/msg_reply_channel_range.rs
-./fuzz/src/msg_targets/msg_reply_short_channel_ids_end.rs
-./fuzz/src/msg_targets/msg_revoke_and_ack.rs
-./fuzz/src/msg_targets/msg_shutdown.rs
-./fuzz/src/msg_targets/msg_splice.rs
-./fuzz/src/msg_targets/msg_splice_ack.rs
-./fuzz/src/msg_targets/msg_splice_locked.rs
-./fuzz/src/msg_targets/msg_stfu.rs
-./fuzz/src/msg_targets/msg_tx_abort.rs
-./fuzz/src/msg_targets/msg_tx_ack_rbf.rs
-./fuzz/src/msg_targets/msg_tx_add_input.rs
-./fuzz/src/msg_targets/msg_tx_add_output.rs
-./fuzz/src/msg_targets/msg_tx_complete.rs
-./fuzz/src/msg_targets/msg_tx_init_rbf.rs
-./fuzz/src/msg_targets/msg_tx_remove_input.rs
-./fuzz/src/msg_targets/msg_tx_remove_output.rs
-./fuzz/src/msg_targets/msg_tx_signatures.rs
-./fuzz/src/msg_targets/msg_update_add_htlc.rs
-./fuzz/src/msg_targets/msg_update_fail_htlc.rs
-./fuzz/src/msg_targets/msg_update_fail_malformed_htlc.rs
-./fuzz/src/msg_targets/msg_update_fee.rs
-./fuzz/src/msg_targets/msg_update_fulfill_htlc.rs
-./fuzz/src/msg_targets/msg_warning_message.rs
-./fuzz/src/msg_targets/utils.rs
-./fuzz/src/offer_deser.rs
-./fuzz/src/onion_hop_data.rs
-./fuzz/src/onion_message.rs
-./fuzz/src/peer_crypt.rs
-./fuzz/src/process_network_graph.rs
-./fuzz/src/refund_deser.rs
-./fuzz/src/router.rs
-./fuzz/src/utils/mod.rs
-./fuzz/src/utils/test_logger.rs
-./fuzz/src/utils/test_persister.rs
-./fuzz/src/zbase32.rs
./lightning-background-processor/src/lib.rs
./lightning-block-sync/src/convert.rs
./lightning-block-sync/src/gossip.rs
./lightning-rapid-gossip-sync/src/error.rs
./lightning-rapid-gossip-sync/src/lib.rs
./lightning-rapid-gossip-sync/src/processing.rs
-./lightning-transaction-sync/src/common.rs
-./lightning-transaction-sync/src/electrum.rs
-./lightning-transaction-sync/src/error.rs
-./lightning-transaction-sync/src/esplora.rs
-./lightning-transaction-sync/src/lib.rs
-./lightning-transaction-sync/tests/integration_tests.rs
./lightning/src/blinded_path/message.rs
./lightning/src/blinded_path/mod.rs
./lightning/src/blinded_path/payment.rs