cd ..
- name: Run benchmarks on Rust ${{ matrix.toolchain }}
run: |
- RUSTC_BOOTSTRAP=1 cargo bench --features _bench_unstable
+ cd bench
+ RUSTFLAGS="--cfg=ldk_bench --cfg=require_route_graph_test" cargo bench
+ - name: Run benchmarks with hashbrown on Rust ${{ matrix.toolchain }}
+ run: |
+ cd bench
+ RUSTFLAGS="--cfg=ldk_bench --cfg=require_route_graph_test" cargo bench --features hashbrown
check_commits:
runs-on: ubuntu-latest
Cargo.lock
.idea
lightning/target
-lightning/ldk-net_graph-*.bin
+lightning/net_graph-*.bin
+lightning-rapid-gossip-sync/res/full_graph.lngossip
lightning-custom-message/target
+lightning-transaction-sync/target
no-std-check/target
"lightning-custom-message",
"lightning-transaction-sync",
"no-std-check",
+ "bench",
]
# Our tests do actual crypto and lots of work, the tradeoff for -O2 is well
opt-level = 3
lto = true
panic = "abort"
-
-[profile.bench]
-opt-level = 3
-codegen-units = 1
-lto = true
* BD6EED4D339EDBF7E7CE7F8836153082BDF676FD (Elias Rohrer)
* 6E0287D8849AE741E47CC586FD3E106A2CE099B4 (Valentine Wallace)
* 69CFEA635D0E6E6F13FD9D9136D932FCAC0305F0 (Arik Sosman)
- * A5A6868D7AA91DD00AC1A67F817FFA028EF61C94 (Antoine Riard)
--- /dev/null
+[package]
+name = "lightning-bench"
+version = "0.0.1"
+authors = ["Matt Corallo"]
+edition = "2018"
+
+[[bench]]
+name = "bench"
+harness = false
+
+[features]
+hashbrown = ["lightning/hashbrown"]
+
+[dependencies]
+lightning = { path = "../lightning", features = ["_test_utils", "criterion"] }
+lightning-persister = { path = "../lightning-persister", features = ["criterion"] }
+lightning-rapid-gossip-sync = { path = "../lightning-rapid-gossip-sync", features = ["criterion"] }
+criterion = { version = "0.4", default-features = false }
+
+[profile.release]
+opt-level = 3
+codegen-units = 1
+lto = true
+panic = "abort"
+debug = true
--- /dev/null
+This crate uses criterion to benchmark various LDK functions.
+
+It can be run as `RUSTFLAGS=--cfg=ldk_bench cargo bench`.
+
+For routing or other HashMap-bottlenecked functions, the `hashbrown` feature
+should also be benchmarked.
--- /dev/null
+extern crate lightning;
+extern crate lightning_persister;
+
+extern crate criterion;
+
+use criterion::{criterion_group, criterion_main};
+
+criterion_group!(benches,
+ // Note that benches run in the order given here. Thus, they're sorted according to how likely
+ // developers are to be working on the specific code listed, then by runtime.
+ lightning::routing::router::benches::generate_routes_with_zero_penalty_scorer,
+ lightning::routing::router::benches::generate_mpp_routes_with_zero_penalty_scorer,
+ lightning::routing::router::benches::generate_routes_with_probabilistic_scorer,
+ lightning::routing::router::benches::generate_mpp_routes_with_probabilistic_scorer,
+ lightning::routing::router::benches::generate_large_mpp_routes_with_probabilistic_scorer,
+ lightning::sign::benches::bench_get_secure_random_bytes,
+ lightning::ln::channelmanager::bench::bench_sends,
+ lightning_persister::bench::bench_sends,
+ lightning_rapid_gossip_sync::bench::bench_reading_full_graph_from_file,
+ lightning::routing::gossip::benches::read_network_graph,
+ lightning::routing::gossip::benches::write_network_graph);
+criterion_main!(benches);
//! send-side handling is correct, other peers. We consider it a failure if any action results in a
//! channel being force-closed.
-use bitcoin::TxMerkleNode;
-use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::blockdata::transaction::{Transaction, TxOut};
use bitcoin::blockdata::script::{Builder, Script};
use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
use lightning::ln::msgs::{self, CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init};
use lightning::ln::script::ShutdownScript;
+use lightning::ln::functional_test_utils::*;
use lightning::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
use lightning::util::errors::APIError;
use lightning::util::logger::Logger;
pub struct TestBroadcaster {}
impl BroadcasterInterface for TestBroadcaster {
- fn broadcast_transaction(&self, _tx: &Transaction) { }
+ fn broadcast_transactions(&self, _txs: &[&Transaction]) { }
}
pub struct VecWriter(pub Vec<u8>);
macro_rules! confirm_txn {
($node: expr) => { {
let chain_hash = genesis_block(Network::Bitcoin).block_hash();
- let mut header = BlockHeader { version: 0x20000000, prev_blockhash: chain_hash, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
+ let mut header = create_dummy_header(chain_hash, 42);
let txdata: Vec<_> = channel_txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect();
$node.transactions_confirmed(&header, &txdata, 1);
for _ in 2..100 {
- header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
+ header = create_dummy_header(header.block_hash(), 42);
}
$node.best_block_updated(&header, 99);
} }
//! or payments to send/ways to handle events generated.
//! This test has been very useful, though due to its complexity good starting inputs are critical.
-use bitcoin::TxMerkleNode;
-use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::blockdata::transaction::{Transaction, TxOut};
use bitcoin::blockdata::script::{Builder, Script};
use lightning::ln::peer_handler::{MessageHandler,PeerManager,SocketDescriptor,IgnoringMessageHandler};
use lightning::ln::msgs::{self, DecodeError};
use lightning::ln::script::ShutdownScript;
+use lightning::ln::functional_test_utils::*;
use lightning::routing::gossip::{P2PGossipSync, NetworkGraph};
use lightning::routing::utxo::UtxoLookup;
use lightning::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteParameters, Router};
use lightning::util::errors::APIError;
use lightning::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
use lightning::util::logger::Logger;
-use lightning::util::ser::{Readable, ReadableArgs, Writeable};
+use lightning::util::ser::{ReadableArgs, Writeable};
use crate::utils::test_logger;
use crate::utils::test_persister::TestPersister;
txn_broadcasted: Mutex<Vec<Transaction>>,
}
impl BroadcasterInterface for TestBroadcaster {
- fn broadcast_transaction(&self, tx: &Transaction) {
- self.txn_broadcasted.lock().unwrap().push(tx.clone());
+ fn broadcast_transactions(&self, txs: &[&Transaction]) {
+ let owned_txs: Vec<Transaction> = txs.iter().map(|tx| (*tx).clone()).collect();
+ self.txn_broadcasted.lock().unwrap().extend(owned_txs);
}
}
}
self.blocks_connected += 1;
- let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height].0, merkle_root: TxMerkleNode::all_zeros(), time: self.blocks_connected, bits: 42, nonce: 42 };
+ let header = create_dummy_header(self.header_hashes[self.height].0, self.blocks_connected);
self.height += 1;
self.manager.transactions_confirmed(&header, &txdata, self.height as u32);
self.manager.best_block_updated(&header, self.height as u32);
fn disconnect_block(&mut self) {
if self.height > 0 && (self.max_height < 6 || self.height >= self.max_height - 6) {
- let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height - 1].0, merkle_root: TxMerkleNode::all_zeros(), time: self.header_hashes[self.height].1, bits: 42, nonce: 42 };
+ let header = create_dummy_header(self.header_hashes[self.height - 1].0, self.header_hashes[self.height].1);
self.manager.block_disconnected(&header, self.height as u32);
self.monitor.block_disconnected(&header, self.height as u32);
self.height -= 1;
// 00 030000000000000000000000000000000000000000000000000000000000000002 03000000000000000000000000000000 - noise act two (0||pubkey||mac)
//
// 030012 - inbound read from peer id 0 of len 18
- // 000a 03000000000000000000000000000000 - message header indicating message length 10
- // 03001a - inbound read from peer id 0 of len 26
- // 0010 00022000 00022000 03000000000000000000000000000000 - init message (type 16) with static_remotekey (0x2000) and mac
+ // 0010 03000000000000000000000000000000 - message header indicating message length 16
+ // 030020 - inbound read from peer id 0 of len 32
+ // 0010 00021aaa 0008aaaaaaaaaaaa9aaa 03000000000000000000000000000000 - init message (type 16) with static_remotekey required and other bits optional and mac
//
// 030012 - inbound read from peer id 0 of len 18
- // 0141 03000000000000000000000000000000 - message header indicating message length 321
+ // 0147 03000000000000000000000000000000 - message header indicating message length 327
// 0300fe - inbound read from peer id 0 of len 254
// 0020 7500000000000000000000000000000000000000000000000000000000000000 ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679 000000000000c350 0000000000000000 0000000000000162 ffffffffffffffff 0000000000000222 0000000000000000 000000fd 0006 01e3 030000000000000000000000000000000000000000000000000000000000000001 030000000000000000000000000000000000000000000000000000000000000002 030000000000000000000000000000000000000000000000000000000000000003 030000000000000000000000000000000000000000000000000000000000000004 - beginning of open_channel message
- // 030053 - inbound read from peer id 0 of len 83
- // 030000000000000000000000000000000000000000000000000000000000000005 020900000000000000000000000000000000000000000000000000000000000000 01 03000000000000000000000000000000 - rest of open_channel and mac
+ // 030059 - inbound read from peer id 0 of len 89
+ // 030000000000000000000000000000000000000000000000000000000000000005 020900000000000000000000000000000000000000000000000000000000000000 01 0000 01021000 03000000000000000000000000000000 - rest of open_channel and mac
//
// 00fd00fd - Two feerate requests (all returning min feerate, which our open_channel also uses) (gonna be ingested by FuzzEstimator)
// - client should now respond with accept_channel (CHECK 1: type 33 to peer 03000000)
// 000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000 - inbound noise act 3
//
// 030112 - inbound read from peer id 1 of len 18
- // 000a 01000000000000000000000000000000 - message header indicating message length 10
- // 03011a - inbound read from peer id 1 of len 26
- // 0010 00022000 00022000 01000000000000000000000000000000 - init message (type 16) with static_remotekey (0x2000) and mac
+ // 0010 01000000000000000000000000000000 - message header indicating message length 16
+ // 030120 - inbound read from peer id 1 of len 32
+ // 0010 00021aaa 0008aaaaaaaaaaaa9aaa 01000000000000000000000000000000 - init message (type 16) with static_remotekey required and other bits optional and mac
//
// 05 01 030200000000000000000000000000000000000000000000000000000000000000 00c350 0003e8 - create outbound channel to peer 1 for 50k sat
// 00fd - One feerate requests (all returning min feerate) (gonna be ingested by FuzzEstimator)
//
// 030112 - inbound read from peer id 1 of len 18
- // 0110 01000000000000000000000000000000 - message header indicating message length 272
+ // 0112 01000000000000000000000000000000 - message header indicating message length 274
// 0301ff - inbound read from peer id 1 of len 255
// 0021 0000000000000000000000000000000000000000000000000000000000000e05 0000000000000162 00000000004c4b40 00000000000003e8 00000000000003e8 00000002 03f0 0005 030000000000000000000000000000000000000000000000000000000000000100 030000000000000000000000000000000000000000000000000000000000000200 030000000000000000000000000000000000000000000000000000000000000300 030000000000000000000000000000000000000000000000000000000000000400 030000000000000000000000000000000000000000000000000000000000000500 02660000000000000000000000000000 - beginning of accept_channel
- // 030121 - inbound read from peer id 1 of len 33
- // 0000000000000000000000000000000000 01000000000000000000000000000000 - rest of accept_channel and mac
+ // 030123 - inbound read from peer id 1 of len 35
+ // 0000000000000000000000000000000000 0000 01000000000000000000000000000000 - rest of accept_channel and mac
//
// 0a - create the funding transaction (client should send funding_created now)
//
// - client now fails the HTLC backwards as it was unable to extract the payment preimage (CHECK 9 duplicate and CHECK 10)
let logger = Arc::new(TrackingLogger { lines: Mutex::new(HashMap::new()) });
- super::do_test(&::hex::decode("01000000000000000000000000000000000000000000000000000000000000000000000001000300000000000000000000000000000000000000000000000000000000000000020300320003000000000000000000000000000000000000000000000000000000000000000203000000000000000000000000000000030012000a0300000000000000000000000000000003001a00100002200000022000030000000000000000000000000000000300120141030000000000000000000000000000000300fe00207500000000000000000000000000000000000000000000000000000000000000ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679000000000000c35000000000000000000000000000000162ffffffffffffffff00000000000002220000000000000000000000fd000601e3030000000000000000000000000000000000000000000000000000000000000001030000000000000000000000000000000000000000000000000000000000000002030000000000000000000000000000000000000000000000000000000000000003030000000000000000000000000000000000000000000000000000000000000004030053030000000000000000000000000000000000000000000000000000000000000005020900000000000000000000000000000000000000000000000000000000000000010300000000000000000000000000000000fd00fd0300120084030000000000000000000000000000000300940022ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb1819096793d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000210100000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000c005e020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae00000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c000003001200430300000000000000000000000000000003005300243d0000000000000000000000000000000000000000000000000000000000000002080000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000010301320003000000000000000000000000000000000000000000000000000000000000000703000000000000000000000000000000030142000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000030112000a0100000000000000000000000000000003011a0010000220000002200001000000000000000000000000000000050103020000000000000000000000000000000000000000000000000000000000000000c3500003e800fd0301120110010000000000000000000000000000000301ff00210000000000000000000000000000000000000000000000000000000000000e05000000000000016200000000004c4b4000000000000003e800000000000003e80000000203f00005030000000000000000000000000000000000000000000000000000000000000100030000000000000000000000000000000000000000000000000000000000000200030000000000000000000000000000000000000000000000000000000000000300030000000000000000000000000000000000000000000000000000000000000400030000000000000000000000000000000000000000000000000000000000000500026600000000000000000000000000000301210000000000000000000000000000000000010000000000000000000000000000000a03011200620100000000000000000000000000000003017200233a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007c0001000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000b03011200430100000000000000000000000000000003015300243a000000000000000000000000000000000000000000000000000000000000000267000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f00003000000000000000000000000000000000000000000000000000000000000055511020203e80401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffab000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000020b00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006a000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000660000000000000000000000000000000000000000000000000000000000000002640000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112004a0100000000000000000000000000000003015a00823a000000000000000000000000000000000000000000000000000000000000000000000000000000ff008888888888888888888888888888888888888888888888888888888888880100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a0000000000000000000000000000000000000000000000000000000000000067000000000000000000000000000000000000000000000000000000000000000265000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000010000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f00003000000000000000000000000000000000000000000000000000000000000055511020203e80401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffab000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000020a000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c3010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000b00000000000000000000000000000000000000000000000000000000000000020d00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000002700000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112002c0100000000000000000000000000000003013c00833a00000000000000000000000000000000000000000000000000000000000000000000000000000100000100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a000000000000000000000000000000000000000000000000000000000000006500000000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000703001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000020c000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000032010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d00000000000000000000000000000000000000000000000000000000000000000000000000000200000000000b0838ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005551202030927c00401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff53000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200a4030000000000000000000000000000000300b400843d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007501000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000006705000000000000000000000000000000000000000000000000000000000000060300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000000020f0000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000070c007d02000000013a000000000000000000000000000000000000000000000000000000000000000000000000000000800258020000000000002200204b0000000000000000000000000000000000000000000000000000000000000014c00000000000001600142800000000000000000000000000000000000000050000200c005e0200000001730000000000000000000000000000000000000000000000000000000000000000000000000000000001a701000000000000220020b200000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c000007").unwrap(), &(Arc::clone(&logger) as Arc<dyn Logger>));
+ super::do_test(&::hex::decode("01000000000000000000000000000000000000000000000000000000000000000000000001000300000000000000000000000000000000000000000000000000000000000000020300320003000000000000000000000000000000000000000000000000000000000000000203000000000000000000000000000000030012001003000000000000000000000000000000030020001000021aaa0008aaaaaaaaaaaa9aaa030000000000000000000000000000000300120147030000000000000000000000000000000300fe00207500000000000000000000000000000000000000000000000000000000000000ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679000000000000c35000000000000000000000000000000162ffffffffffffffff00000000000002220000000000000000000000fd000601e3030000000000000000000000000000000000000000000000000000000000000001030000000000000000000000000000000000000000000000000000000000000002030000000000000000000000000000000000000000000000000000000000000003030000000000000000000000000000000000000000000000000000000000000004030059030000000000000000000000000000000000000000000000000000000000000005020900000000000000000000000000000000000000000000000000000000000000010000010210000300000000000000000000000000000000fd00fd0300120084030000000000000000000000000000000300940022ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb1819096793d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000210100000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000c005e020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae00000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c000003001200430300000000000000000000000000000003005300243d0000000000000000000000000000000000000000000000000000000000000002080000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000010301320003000000000000000000000000000000000000000000000000000000000000000703000000000000000000000000000000030142000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000030112001001000000000000000000000000000000030120001000021aaa0008aaaaaaaaaaaa9aaa01000000000000000000000000000000050103020000000000000000000000000000000000000000000000000000000000000000c3500003e800fd0301120112010000000000000000000000000000000301ff00210000000000000000000000000000000000000000000000000000000000000e05000000000000016200000000004c4b4000000000000003e800000000000003e80000000203f000050300000000000000000000000000000000000000000000000000000000000001000300000000000000000000000000000000000000000000000000000000000002000300000000000000000000000000000000000000000000000000000000000003000300000000000000000000000000000000000000000000000000000000000004000300000000000000000000000000000000000000000000000000000000000005000266000000000000000000000000000003012300000000000000000000000000000000000000010000000000000000000000000000000a03011200620100000000000000000000000000000003017200233a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007c0001000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000b03011200430100000000000000000000000000000003015300243a000000000000000000000000000000000000000000000000000000000000000267000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f00003000000000000000000000000000000000000000000000000000000000000055511020203e80401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffab000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000020b00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006a000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000660000000000000000000000000000000000000000000000000000000000000002640000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112004a0100000000000000000000000000000003015a00823a000000000000000000000000000000000000000000000000000000000000000000000000000000ff008888888888888888888888888888888888888888888888888888888888880100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a0000000000000000000000000000000000000000000000000000000000000067000000000000000000000000000000000000000000000000000000000000000265000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000010000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f00003000000000000000000000000000000000000000000000000000000000000055511020203e80401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffab000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000020a000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c3010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000b00000000000000000000000000000000000000000000000000000000000000020d00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000002700000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112002c0100000000000000000000000000000003013c00833a00000000000000000000000000000000000000000000000000000000000000000000000000000100000100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a000000000000000000000000000000000000000000000000000000000000006500000000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000703001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000020c000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000032010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d00000000000000000000000000000000000000000000000000000000000000000000000000000200000000000b0838ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005551202030927c00401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff53000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200a4030000000000000000000000000000000300b400843d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007501000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000006705000000000000000000000000000000000000000000000000000000000000060300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000000020f0000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000070c007d02000000013a000000000000000000000000000000000000000000000000000000000000000000000000000000800258020000000000002200204b0000000000000000000000000000000000000000000000000000000000000014c00000000000001600142800000000000000000000000000000000000000050000200c005e0200000001730000000000000000000000000000000000000000000000000000000000000000000000000000000001a701000000000000220020b200000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c000007").unwrap(), &(Arc::clone(&logger) as Arc<dyn Logger>));
let log_entries = logger.lines.lock().unwrap();
assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendAcceptChannel event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679".to_string())), Some(&1)); // 1
use lightning::routing::gossip::{NetworkGraph, RoutingFees};
use lightning::routing::utxo::{UtxoFuture, UtxoLookup, UtxoLookupError, UtxoResult};
use lightning::routing::router::{find_route, PaymentParameters, RouteHint, RouteHintHop, RouteParameters};
-use lightning::routing::scoring::ProbabilisticScorer;
+use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters};
use lightning::util::config::UserConfig;
use lightning::util::ser::Readable;
}]));
}
}
- let scorer = ProbabilisticScorer::new(Default::default(), &net_graph, &logger);
+ let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &net_graph, &logger);
let random_seed_bytes: [u8; 32] = [get_slice!(1)[0]; 32];
for target in node_pks.iter() {
let final_value_msat = slice_to_be64(get_slice!(8));
};
let _ = find_route(&our_pubkey, &route_params, &net_graph,
first_hops.map(|c| c.iter().collect::<Vec<_>>()).as_ref().map(|a| a.as_slice()),
- &logger, &scorer, &random_seed_bytes);
+ &logger, &scorer, &ProbabilisticScoringFeeParameters::default(), &random_seed_bytes);
}
},
}
const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
#[cfg(not(test))]
-const SCORER_PERSIST_TIMER: u64 = 30;
+const SCORER_PERSIST_TIMER: u64 = 60 * 60;
#[cfg(test)]
const SCORER_PERSIST_TIMER: u64 = 1;
}
}
+/// Updates scorer based on event and returns whether an update occurred so we can decide whether
+/// to persist.
fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
scorer: &'a S, event: &Event
-) {
+) -> bool {
let mut score = scorer.lock();
match event {
Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
score.probe_failed(path, *scid);
},
- _ => {},
+ _ => return false,
}
+ true
}
macro_rules! define_run_body {
// Note that we want to run a graph prune once not long after startup before
// falling back to our usual hourly prunes. This avoids short-lived clients never
// pruning their network graph. We run once 60 seconds after startup before
- // continuing our normal cadence.
+ // continuing our normal cadence. For RGS, since 60 seconds is likely too long,
+ // we prune after an initial sync completes.
let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
- if $timer_elapsed(&mut last_prune_call, prune_timer) {
+ let prune_timer_elapsed = $timer_elapsed(&mut last_prune_call, prune_timer);
+ let should_prune = match $gossip_sync {
+ GossipSync::Rapid(_) => !have_pruned || prune_timer_elapsed,
+ _ => prune_timer_elapsed,
+ };
+ if should_prune {
// The network graph must not be pruned while rapid sync completion is pending
if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
#[cfg(feature = "std")] {
let network_graph = gossip_sync.network_graph();
let event_handler = &event_handler;
let scorer = &scorer;
+ let logger = &logger;
+ let persister = &persister;
async move {
if let Some(network_graph) = network_graph {
handle_network_graph_update(network_graph, &event)
}
if let Some(ref scorer) = scorer {
- update_scorer(scorer, &event);
+ if update_scorer(scorer, &event) {
+ log_trace!(logger, "Persisting scorer after update");
+ if let Err(e) = persister.persist_scorer(&scorer) {
+ log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
+ }
+ }
}
event_handler(event).await;
}
handle_network_graph_update(network_graph, &event)
}
if let Some(ref scorer) = scorer {
- update_scorer(scorer, &event);
+ if update_scorer(scorer, &event) {
+ log_trace!(logger, "Persisting scorer after update");
+ if let Err(e) = persister.persist_scorer(&scorer) {
+ log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
+ }
+ }
}
event_handler.handle_event(event);
};
#[cfg(all(feature = "std", test))]
mod tests {
- use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::blockdata::locktime::PackedLockTime;
use bitcoin::blockdata::transaction::{Transaction, TxOut};
use lightning::ln::channelmanager;
use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
use lightning::ln::features::{ChannelFeatures, NodeFeatures};
+ use lightning::ln::functional_test_utils::*;
use lightning::ln::msgs::{ChannelMessageHandler, Init};
use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
use std::sync::{Arc, Mutex};
use std::sync::mpsc::SyncSender;
use std::time::Duration;
- use bitcoin::hashes::Hash;
- use bitcoin::TxMerkleNode;
use lightning_rapid_gossip_sync::RapidGossipSync;
use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
fn disconnect_socket(&mut self) {}
}
- type ChannelManager = channelmanager::ChannelManager<Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<KeysManager>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<DefaultRouter< Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>, Arc<Mutex<TestScorer>>>>, Arc<test_utils::TestLogger>>;
+ type ChannelManager = channelmanager::ChannelManager<Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<KeysManager>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<DefaultRouter<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>, Arc<Mutex<TestScorer>>, (), TestScorer>>, Arc<test_utils::TestLogger>>;
type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
}
impl Score for TestScorer {
+ type ScoreParams = ();
fn channel_penalty_msat(
- &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage
+ &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage, _score_params: &Self::ScoreParams
) -> u64 { unimplemented!(); }
fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64) {
fn create_nodes(num_nodes: usize, persist_dir: &str) -> (String, Vec<Node>) {
let persist_temp_path = env::temp_dir().join(persist_dir);
let persist_dir = persist_temp_path.to_string_lossy().to_string();
- let network = Network::Testnet;
+ let network = Network::Bitcoin;
let mut nodes = Vec::new();
for i in 0..num_nodes {
let tx_broadcaster = Arc::new(test_utils::TestBroadcaster::new(network));
let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
let scorer = Arc::new(Mutex::new(TestScorer::new()));
let seed = [i as u8; 32];
- let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone()));
- let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet));
+ let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), ()));
+ let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin));
let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", &persist_dir, i)));
let now = Duration::from_secs(genesis_block.header.time as u64);
let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
for i in 1..=depth {
let prev_blockhash = node.best_block.block_hash();
let height = node.best_block.height() + 1;
- let header = BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 };
+ let header = create_dummy_header(prev_blockhash, height);
let txdata = vec![(0, tx)];
node.best_block = BestBlock::new(header.block_hash(), height);
match i {
if !std::thread::panicking() {
bg_processor.stop().unwrap();
}
+
+ let log_entries = nodes[0].logger.lines.lock().unwrap();
+ let expected_log = "Persisting scorer after update".to_string();
+ assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5);
}
#[tokio::test]
let t2 = tokio::spawn(async move {
do_test_payment_path_scoring!(nodes, receiver.recv().await);
exit_sender.send(()).unwrap();
+
+ let log_entries = nodes[0].logger.lines.lock().unwrap();
+ let expected_log = "Persisting scorer after update".to_string();
+ assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5);
});
let (r1, r2) = tokio::join!(t1, t2);
if let Network::Bitcoin = network {
if self.height % 2016 == 0 {
- let previous_work = previous_header.header.work();
- if work > (previous_work << 2) || work < (previous_work >> 2) {
+ let target = self.header.target();
+ let previous_target = previous_header.header.target();
+ let min_target = previous_target >> 2;
+ let max_target = previous_target << 2;
+ if target > max_target || target < min_target {
return Err(BlockSourceError::persistent("invalid difficulty transition"))
}
} else if self.header.bits != previous_header.header.bits {
//! # use bitcoin::secp256k1::PublicKey;
//! # use lightning::io;
//! # use lightning::ln::msgs::{DecodeError, LightningError};
+//! # use lightning::ln::features::{InitFeatures, NodeFeatures};
//! use lightning::ln::peer_handler::CustomMessageHandler;
//! use lightning::ln::wire::{CustomMessageReader, self};
//! use lightning::util::ser::Writeable;
//! # fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> {
//! # unimplemented!()
//! # }
+//! # fn provided_node_features(&self) -> NodeFeatures {
+//! # unimplemented!()
+//! # }
+//! # fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
+//! # unimplemented!()
+//! # }
//! }
//!
//! #[derive(Debug)]
//! # fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> {
//! # unimplemented!()
//! # }
+//! # fn provided_node_features(&self) -> NodeFeatures {
+//! # unimplemented!()
+//! # }
+//! # fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
+//! # unimplemented!()
+//! # }
//! }
//!
//! #[derive(Debug)]
//! # fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> {
//! # unimplemented!()
//! # }
+//! # fn provided_node_features(&self) -> NodeFeatures {
+//! # unimplemented!()
+//! # }
+//! # fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
+//! # unimplemented!()
+//! # }
//! }
//!
//! # fn main() {
)*
.collect()
}
+
+ fn provided_node_features(&self) -> $crate::lightning::ln::features::NodeFeatures {
+ $crate::lightning::ln::features::NodeFeatures::empty()
+ $(
+ | self.$field.provided_node_features()
+ )*
+ }
+
+ fn provided_init_features(
+ &self, their_node_id: &$crate::bitcoin::secp256k1::PublicKey
+ ) -> $crate::lightning::ln::features::InitFeatures {
+ $crate::lightning::ln::features::InitFeatures::empty()
+ $(
+ | self.$field.provided_init_features(their_node_id)
+ )*
+ }
}
impl $crate::lightning::ln::wire::CustomMessageReader for $handler {
use secp256k1::{Message, Secp256k1};
use secp256k1::ecdsa::RecoverableSignature;
+use core::cmp::Ordering;
use core::fmt::{Display, Formatter, self};
use core::iter::FilterMap;
use core::num::ParseIntError;
/// 3. using `str::parse::<Invoice>(&str)` (see [`Invoice::from_str`])
///
/// [`Invoice::from_str`]: crate::Invoice#impl-FromStr
-#[derive(Eq, PartialEq, Debug, Clone, Hash)]
+#[derive(Eq, PartialEq, Debug, Clone, Hash, Ord, PartialOrd)]
pub struct Invoice {
signed_invoice: SignedRawInvoice,
}
///
/// This is not exported to bindings users as we don't have a good way to map the reference lifetimes making this
/// practically impossible to use safely in languages like C.
-#[derive(Eq, PartialEq, Debug, Clone)]
+#[derive(Eq, PartialEq, Debug, Clone, Ord, PartialOrd)]
pub enum InvoiceDescription<'f> {
/// Reference to the directly supplied description in the invoice
Direct(&'f Description),
///
/// # Invariants
/// The hash has to be either from the deserialized invoice or from the serialized [`RawInvoice`].
-#[derive(Eq, PartialEq, Debug, Clone, Hash)]
+#[derive(Eq, PartialEq, Debug, Clone, Hash, Ord, PartialOrd)]
pub struct SignedRawInvoice {
/// The rawInvoice that the signature belongs to
raw_invoice: RawInvoice,
/// Decoding and encoding should not lead to information loss but may lead to different hashes.
///
/// For methods without docs see the corresponding methods in [`Invoice`].
-#[derive(Eq, PartialEq, Debug, Clone, Hash)]
+#[derive(Eq, PartialEq, Debug, Clone, Hash, Ord, PartialOrd)]
pub struct RawInvoice {
/// human readable part
pub hrp: RawHrp,
/// Data of the [`RawInvoice`] that is encoded in the human readable part.
///
/// This is not exported to bindings users as we don't yet support `Option<Enum>`
-#[derive(Eq, PartialEq, Debug, Clone, Hash)]
+#[derive(Eq, PartialEq, Debug, Clone, Hash, Ord, PartialOrd)]
pub struct RawHrp {
/// The currency deferred from the 3rd and 4th character of the bech32 transaction
pub currency: Currency,
}
/// Data of the [`RawInvoice`] that is encoded in the data part
-#[derive(Eq, PartialEq, Debug, Clone, Hash)]
+#[derive(Eq, PartialEq, Debug, Clone, Hash, Ord, PartialOrd)]
pub struct RawDataPart {
/// generation time of the invoice
pub timestamp: PositiveTimestamp,
///
/// The Unix timestamp representing the stored time has to be positive and no greater than
/// [`MAX_TIMESTAMP`].
-#[derive(Eq, PartialEq, Debug, Clone, Hash)]
+#[derive(Eq, PartialEq, Debug, Clone, Hash, Ord, PartialOrd)]
pub struct PositiveTimestamp(Duration);
/// SI prefixes for the human readable part
-#[derive(Eq, PartialEq, Debug, Clone, Copy, Hash)]
+#[derive(Eq, PartialEq, Debug, Clone, Copy, Hash, Ord, PartialOrd)]
pub enum SiPrefix {
/// 10^-3
Milli,
}
/// Enum representing the crypto currencies (or networks) supported by this library
-#[derive(Clone, Debug, Hash, Eq, PartialEq)]
+#[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub enum Currency {
/// Bitcoin mainnet
Bitcoin,
/// Tagged field which may have an unknown tag
///
/// This is not exported to bindings users as we don't currently support TaggedField
-#[derive(Clone, Debug, Hash, Eq, PartialEq)]
+#[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub enum RawTaggedField {
/// Parsed tagged field with known tag
KnownSemantics(TaggedField),
/// This is not exported to bindings users as we don't yet support enum variants with the same name the struct contained
/// in the variant.
#[allow(missing_docs)]
-#[derive(Clone, Debug, Hash, Eq, PartialEq)]
+#[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub enum TaggedField {
PaymentHash(Sha256),
Description(Description),
}
/// SHA-256 hash
-#[derive(Clone, Debug, Hash, Eq, PartialEq)]
+#[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub struct Sha256(/// This is not exported to bindings users as the native hash types are not currently mapped
pub sha256::Hash);
///
/// # Invariants
/// The description can be at most 639 __bytes__ long
-#[derive(Clone, Debug, Hash, Eq, PartialEq)]
+#[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub struct Description(String);
/// Payee public key
-#[derive(Clone, Debug, Hash, Eq, PartialEq)]
+#[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub struct PayeePubKey(pub PublicKey);
/// Positive duration that defines when (relatively to the timestamp) in the future the invoice
/// expires
-#[derive(Clone, Debug, Hash, Eq, PartialEq)]
+#[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub struct ExpiryTime(Duration);
/// `min_final_cltv_expiry_delta` to use for the last HTLC in the route
-#[derive(Clone, Debug, Hash, Eq, PartialEq)]
+#[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub struct MinFinalCltvExpiryDelta(pub u64);
/// Fallback address in case no LN payment is possible
#[allow(missing_docs)]
-#[derive(Clone, Debug, Hash, Eq, PartialEq)]
+#[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub enum Fallback {
SegWitProgram {
version: WitnessVersion,
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
pub struct InvoiceSignature(pub RecoverableSignature);
+impl PartialOrd for InvoiceSignature {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.0.serialize_compact().1.partial_cmp(&other.0.serialize_compact().1)
+ }
+}
+
+impl Ord for InvoiceSignature {
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.0.serialize_compact().1.cmp(&other.0.serialize_compact().1)
+ }
+}
+
/// Private routing information
///
/// # Invariants
/// The encoded route has to be <1024 5bit characters long (<=639 bytes or <=12 hops)
///
-#[derive(Clone, Debug, Hash, Eq, PartialEq)]
+#[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub struct PrivateRoute(RouteHint);
/// Tag constants as specified in BOLT11
}
/// An error that may occur when making a payment.
-#[derive(Clone, Debug)]
+#[derive(Clone, Debug, PartialEq, Eq)]
pub enum PaymentError {
/// An error resulting from the provided [`Invoice`] or payment hash.
Invoice(&'static str),
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
-[features]
-_bench_unstable = ["lightning/_bench_unstable"]
-
[dependencies]
bitcoin = "0.29.0"
lightning = { version = "0.0.115", path = "../lightning" }
[target.'cfg(windows)'.dependencies]
winapi = { version = "0.3", features = ["winbase"] }
+[target.'cfg(ldk_bench)'.dependencies]
+criterion = { version = "0.4", optional = true, default-features = false }
+
[dev-dependencies]
lightning = { version = "0.0.115", path = "../lightning", features = ["_test_utils"] }
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
-#![cfg_attr(all(test, feature = "_bench_unstable"), feature(test))]
-#[cfg(all(test, feature = "_bench_unstable"))] extern crate test;
+#[cfg(ldk_bench)] extern crate criterion;
mod util;
continue;
}
- let txid = Txid::from_hex(filename.split_at(64).0)
+ let txid: Txid = Txid::from_hex(filename.split_at(64).0)
.map_err(|_| std::io::Error::new(
std::io::ErrorKind::InvalidData,
"Invalid tx ID in filename",
))?;
- let index = filename.split_at(65).1.parse()
+ let index: u16 = filename.split_at(65).1.parse()
.map_err(|_| std::io::Error::new(
std::io::ErrorKind::InvalidData,
"Invalid tx index in filename",
extern crate lightning;
extern crate bitcoin;
use crate::FilesystemPersister;
- use bitcoin::blockdata::block::{Block, BlockHeader};
use bitcoin::hashes::hex::FromHex;
- use bitcoin::{Txid, TxMerkleNode};
+ use bitcoin::Txid;
use lightning::chain::ChannelMonitorUpdateStatus;
use lightning::chain::chainmonitor::Persist;
use lightning::chain::channelmonitor::CLOSED_CHANNEL_UPDATE_ID;
use lightning::ln::functional_test_utils::*;
use lightning::util::test_utils;
use std::fs;
- use bitcoin::hashes::Hash;
#[cfg(target_os = "windows")]
use {
lightning::get_event_msg,
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 1);
- let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
- connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[0].clone()]});
+ connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
check_closed_broadcast!(nodes[1], true);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
check_added_monitors!(nodes[1], 1);
}
}
-#[cfg(all(test, feature = "_bench_unstable"))]
+#[cfg(ldk_bench)]
+/// Benches
pub mod bench {
- use test::Bencher;
+ use criterion::Criterion;
- #[bench]
- fn bench_sends(bench: &mut Bencher) {
+ /// Bench!
+ pub fn bench_sends(bench: &mut Criterion) {
let persister_a = super::FilesystemPersister::new("bench_filesystem_persister_a".to_string());
let persister_b = super::FilesystemPersister::new("bench_filesystem_persister_b".to_string());
- lightning::ln::channelmanager::bench::bench_two_sends(bench, persister_a, persister_b);
+ lightning::ln::channelmanager::bench::bench_two_sends(
+ bench, "bench_filesystem_persisted_sends", persister_a, persister_b);
}
}
default = ["std"]
no-std = ["lightning/no-std"]
std = ["lightning/std"]
-_bench_unstable = []
[dependencies]
lightning = { version = "0.0.115", path = "../lightning", default-features = false }
bitcoin = { version = "0.29.0", default-features = false }
+[target.'cfg(ldk_bench)'.dependencies]
+criterion = { version = "0.4", optional = true, default-features = false }
+
[dev-dependencies]
lightning = { version = "0.0.115", path = "../lightning", features = ["_test_utils"] }
#![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
-// Allow and import test features for benching
-#![cfg_attr(all(test, feature = "_bench_unstable"), feature(test))]
-#[cfg(all(test, feature = "_bench_unstable"))]
-extern crate test;
+#[cfg(ldk_bench)] extern crate criterion;
#[cfg(not(feature = "std"))]
extern crate alloc;
}
}
-#[cfg(all(test, feature = "_bench_unstable"))]
+#[cfg(ldk_bench)]
+/// Benches
pub mod bench {
- use test::Bencher;
-
use bitcoin::Network;
- use lightning::ln::msgs::DecodeError;
+ use criterion::Criterion;
+
+ use std::fs;
+
use lightning::routing::gossip::NetworkGraph;
use lightning::util::test_utils::TestLogger;
use crate::RapidGossipSync;
- #[bench]
- fn bench_reading_full_graph_from_file(b: &mut Bencher) {
+ /// Bench!
+ pub fn bench_reading_full_graph_from_file(b: &mut Criterion) {
let logger = TestLogger::new();
- b.iter(|| {
+ b.bench_function("read_full_graph_from_rgs", |b| b.iter(|| {
let network_graph = NetworkGraph::new(Network::Bitcoin, &logger);
let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
- let sync_result = rapid_sync.sync_network_graph_with_file_path("./res/full_graph.lngossip");
- if let Err(crate::error::GraphSyncError::DecodeError(DecodeError::Io(io_error))) = &sync_result {
- let error_string = format!("Input file lightning-rapid-gossip-sync/res/full_graph.lngossip is missing! Download it from https://bitcoin.ninja/ldk-compressed_graph-bc08df7542-2022-05-05.bin\n\n{:?}", io_error);
- #[cfg(not(require_route_graph_test))]
- {
- println!("{}", error_string);
- return;
- }
- #[cfg(require_route_graph_test)]
- panic!("{}", error_string);
- }
- assert!(sync_result.is_ok())
- });
+ let mut file = match fs::read("../lightning-rapid-gossip-sync/res/full_graph.lngossip") {
+ Ok(f) => f,
+ Err(io_error) => {
+ let error_string = format!(
+ "Input file lightning-rapid-gossip-sync/res/full_graph.lngossip is missing! Download it from https://bitcoin.ninja/ldk-compressed_graph-bc08df7542-2022-05-05.bin\n\n{:?}",
+ io_error);
+ #[cfg(not(require_route_graph_test))]
+ {
+ println!("{}", error_string);
+ return;
+ }
+ #[cfg(require_route_graph_test)]
+ panic!("{}", error_string);
+ },
+ };
+ rapid_sync.update_network_graph_no_std(&mut file, None).unwrap();
+ }));
}
}
}
let chain_hash: BlockHash = Readable::read(read_cursor)?;
+ let ng_genesis_hash = self.network_graph.get_genesis_hash();
+ if chain_hash != ng_genesis_hash {
+ return Err(
+ LightningError {
+ err: "Rapid Gossip Sync data's chain hash does not match the network graph's".to_owned(),
+ action: ErrorAction::IgnoreError,
+ }.into()
+ );
+ }
+
let latest_seen_timestamp: u32 = Readable::read(read_cursor)?;
if let Some(time) = current_time_unix {
panic!("Unexpected update result: {:?}", update_result)
}
}
+
+ #[test]
+ fn fails_early_on_chain_hash_mismatch() {
+ let logger = TestLogger::new();
+ // Set to testnet so that the VALID_RGS_BINARY chain hash of mainnet does not match.
+ let network_graph = NetworkGraph::new(Network::Testnet, &logger);
+
+ assert_eq!(network_graph.read_only().channels().len(), 0);
+
+ let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
+ let update_result = rapid_sync.update_network_graph_no_std(&VALID_RGS_BINARY, Some(0));
+ assert!(update_result.is_err());
+ if let Err(GraphSyncError::LightningError(err)) = update_result {
+ assert_eq!(err.err, "Rapid Gossip Sync data's chain hash does not match the network graph's");
+ } else {
+ panic!("Unexpected update result: {:?}", update_result)
+ }
+ }
}
# Allow signing of local transactions that may have been revoked or will be revoked, for functional testing (e.g. justice tx handling).
# This is unsafe to use in production because it may result in the counterparty publishing taking our funds.
unsafe_revoked_tx_signing = []
-_bench_unstable = []
# Override signing to not include randomness when generating signatures for test vectors.
_test_vectors = []
default-features = false
features = ["bitcoinconsensus", "secp-recovery"]
+[target.'cfg(ldk_bench)'.dependencies]
+criterion = { version = "0.4", optional = true, default-features = false }
+
[target.'cfg(taproot)'.dependencies]
musig2 = { git = "https://github.com/arik-so/rust-musig2", rev = "27797d7" }
/// An interface to send a transaction to the Bitcoin network.
pub trait BroadcasterInterface {
- /// Sends a transaction out to (hopefully) be mined.
- fn broadcast_transaction(&self, tx: &Transaction);
+ /// Sends a list of transactions out to (hopefully) be mined.
+ /// This only needs to handle the actual broadcasting of transactions, LDK will automatically
+ /// rebroadcast transactions that haven't made it into a block.
+ ///
+ /// In some cases LDK may attempt to broadcast a transaction which double-spends another
+ /// and this isn't a bug and can be safely ignored.
+ ///
+ /// If more than one transaction is given, these transactions should be considered to be a
+ /// package and broadcast together. Some of the transactions may or may not depend on each other,
+ /// be sure to manage both cases correctly.
+ ///
+ /// Bitcoin transaction packages are defined in BIP 331 and here:
+ /// https://github.com/bitcoin/bitcoin/blob/master/doc/policy/packages.md
+ fn broadcast_transactions(&self, txs: &[&Transaction]);
}
/// An enum that represents the speed at which we want a transaction to confirm used for feerate
#[cfg(test)]
mod tests {
- use bitcoin::{BlockHeader, TxMerkleNode};
- use bitcoin::hashes::Hash;
use crate::{check_added_monitors, check_closed_broadcast, check_closed_event};
use crate::{expect_payment_sent, expect_payment_claimed, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg};
use crate::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err};
// Connect B's commitment transaction, but only to the ChainMonitor/ChannelMonitor. The
// channel is now closed, but the ChannelManager doesn't know that yet.
- let new_header = BlockHeader {
- version: 2, time: 0, bits: 0, nonce: 0,
- prev_blockhash: nodes[0].best_block_info().0,
- merkle_root: TxMerkleNode::all_zeros() };
+ let new_header = create_dummy_header(nodes[0].best_block_info().0, 0);
nodes[0].chain_monitor.chain_monitor.transactions_confirmed(&new_header,
&[(0, &remote_txn[0]), (1, &remote_txn[1])], nodes[0].best_block_info().1 + 1);
assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
if block_timeout {
// After three blocks, pending MontiorEvents should be released either way.
- let latest_header = BlockHeader {
- version: 2, time: 0, bits: 0, nonce: 0,
- prev_blockhash: nodes[0].best_block_info().0,
- merkle_root: TxMerkleNode::all_zeros() };
+ let latest_header = create_dummy_header(nodes[0].best_block_info().0, 0);
nodes[0].chain_monitor.chain_monitor.best_block_updated(&latest_header, nodes[0].best_block_info().1 + LATENCY_GRACE_PERIOD_BLOCKS);
} else {
let persistences = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clone();
where B::Target: BroadcasterInterface,
L::Target: Logger,
{
- for tx in self.get_latest_holder_commitment_txn(logger).iter() {
+ let commit_txs = self.get_latest_holder_commitment_txn(logger);
+ let mut txs = vec![];
+ for tx in commit_txs.iter() {
log_info!(logger, "Broadcasting local {}", log_tx!(tx));
- broadcaster.broadcast_transaction(tx);
+ txs.push(tx);
}
+ broadcaster.broadcast_transactions(&txs);
self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(self.funding_info.0));
}
let commitment_package = PackageTemplate::build_package(
self.funding_info.0.txid.clone(), self.funding_info.0.index as u32,
PackageSolvingData::HolderFundingOutput(funding_output),
- best_block_height, false, best_block_height,
+ best_block_height, best_block_height
);
self.onchain_tx_handler.update_claims_view_from_requests(
vec![commitment_package], best_block_height, best_block_height,
// First, process non-htlc outputs (to_holder & to_counterparty)
for (idx, outp) in tx.output.iter().enumerate() {
if outp.script_pubkey == revokeable_p2wsh {
- let revk_outp = RevokedOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, outp.value, self.counterparty_commitment_params.on_counterparty_tx_csv);
- let justice_package = PackageTemplate::build_package(commitment_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32, true, height);
+ let revk_outp = RevokedOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, outp.value, self.counterparty_commitment_params.on_counterparty_tx_csv, self.onchain_tx_handler.opt_anchors());
+ let justice_package = PackageTemplate::build_package(commitment_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32, height);
claimable_outpoints.push(justice_package);
to_counterparty_output_info =
Some((idx.try_into().expect("Txn can't have more than 2^32 outputs"), outp.value));
to_counterparty_output_info);
}
let revk_htlc_outp = RevokedHTLCOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, htlc.amount_msat / 1000, htlc.clone(), self.onchain_tx_handler.channel_transaction_parameters.opt_anchors.is_some());
- let justice_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, PackageSolvingData::RevokedHTLCOutput(revk_htlc_outp), htlc.cltv_expiry, true, height);
+ let justice_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, PackageSolvingData::RevokedHTLCOutput(revk_htlc_outp), htlc.cltv_expiry, height);
claimable_outpoints.push(justice_package);
}
}
self.counterparty_commitment_params.counterparty_htlc_base_key,
htlc.clone(), self.onchain_tx_handler.opt_anchors()))
};
- let aggregation = if !htlc.offered { false } else { true };
- let counterparty_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, counterparty_htlc_outp, htlc.cltv_expiry,aggregation, 0);
+ let counterparty_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, counterparty_htlc_outp, htlc.cltv_expiry, 0);
claimable_outpoints.push(counterparty_package);
}
}
let revk_outp = RevokedOutput::build(
per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key,
self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key,
- tx.output[idx].value, self.counterparty_commitment_params.on_counterparty_tx_csv
+ tx.output[idx].value, self.counterparty_commitment_params.on_counterparty_tx_csv,
+ false
);
let justice_package = PackageTemplate::build_package(
htlc_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp),
- height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32, true, height
+ height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32, height
);
claimable_outpoints.push(justice_package);
if outputs_to_watch.is_none() {
for &(ref htlc, _, _) in holder_tx.htlc_outputs.iter() {
if let Some(transaction_output_index) = htlc.transaction_output_index {
- let (htlc_output, aggregable) = if htlc.offered {
+ let htlc_output = if htlc.offered {
let htlc_output = HolderHTLCOutput::build_offered(
htlc.amount_msat, htlc.cltv_expiry, self.onchain_tx_handler.opt_anchors()
);
- (htlc_output, false)
+ htlc_output
} else {
let payment_preimage = if let Some(preimage) = self.payment_preimages.get(&htlc.payment_hash) {
preimage.clone()
let htlc_output = HolderHTLCOutput::build_accepted(
payment_preimage, htlc.amount_msat, self.onchain_tx_handler.opt_anchors()
);
- (htlc_output, self.onchain_tx_handler.opt_anchors())
+ htlc_output
};
let htlc_package = PackageTemplate::build_package(
holder_tx.txid, transaction_output_index,
PackageSolvingData::HolderHTLCOutput(htlc_output),
- htlc.cltv_expiry, aggregable, conf_height
+ htlc.cltv_expiry, conf_height
);
claim_requests.push(htlc_package);
}
let should_broadcast = self.should_broadcast_holder_commitment_txn(logger);
if should_broadcast {
let funding_outp = HolderFundingOutput::build(self.funding_redeemscript.clone(), self.channel_value_satoshis, self.onchain_tx_handler.opt_anchors());
- let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), false, self.best_block.height());
+ let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), self.best_block.height());
claimable_outpoints.push(commitment_package);
self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(self.funding_info.0));
let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript);
#[cfg(test)]
mod tests {
- use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::script::{Script, Builder};
use bitcoin::blockdata::opcodes;
use bitcoin::blockdata::transaction::{Transaction, TxIn, TxOut, EcdsaSighashType};
use crate::util::ser::{ReadableArgs, Writeable};
use crate::sync::{Arc, Mutex};
use crate::io;
- use bitcoin::{PackedLockTime, Sequence, TxMerkleNode, Witness};
+ use bitcoin::{PackedLockTime, Sequence, Witness};
use crate::prelude::*;
fn do_test_funding_spend_refuses_updates(use_local_txn: bool) {
// Connect a commitment transaction, but only to the ChainMonitor/ChannelMonitor. The
// channel is now closed, but the ChannelManager doesn't know that yet.
- let new_header = BlockHeader {
- version: 2, time: 0, bits: 0, nonce: 0,
- prev_blockhash: nodes[0].best_block_info().0,
- merkle_root: TxMerkleNode::all_zeros() };
+ let new_header = create_dummy_header(nodes[0].best_block_info().0, 0);
let conf_height = nodes[0].best_block_info().1 + 1;
nodes[1].chain_monitor.chain_monitor.transactions_confirmed(&new_header,
&[(0, broadcast_tx)], conf_height);
OnchainClaim::Tx(tx) => {
let log_start = if bumped_feerate { "Broadcasting RBF-bumped" } else { "Rebroadcasting" };
log_info!(logger, "{} onchain {}", log_start, log_tx!(tx));
- broadcaster.broadcast_transaction(&tx);
+ broadcaster.broadcast_transactions(&[&tx]);
},
#[cfg(anchors)]
OnchainClaim::Event(event) => {
let package_id = match claim {
OnchainClaim::Tx(tx) => {
log_info!(logger, "Broadcasting onchain {}", log_tx!(tx));
- broadcaster.broadcast_transaction(&tx);
+ broadcaster.broadcast_transactions(&[&tx]);
tx.txid().into_inner()
},
#[cfg(anchors)]
match bump_claim {
OnchainClaim::Tx(bump_tx) => {
log_info!(logger, "Broadcasting RBF-bumped onchain {}", log_tx!(bump_tx));
- broadcaster.broadcast_transaction(&bump_tx);
+ broadcaster.broadcast_transactions(&[&bump_tx]);
},
#[cfg(anchors)]
OnchainClaim::Event(claim_event) => {
match bump_claim {
OnchainClaim::Tx(bump_tx) => {
log_info!(logger, "Broadcasting onchain {}", log_tx!(bump_tx));
- broadcaster.broadcast_transaction(&bump_tx);
+ broadcaster.broadcast_transactions(&[&bump_tx]);
},
#[cfg(anchors)]
OnchainClaim::Event(claim_event) => {
weight: u64,
amount: u64,
on_counterparty_tx_csv: u16,
+ is_counterparty_balance_on_anchors: Option<()>,
}
impl RevokedOutput {
- pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, per_commitment_key: SecretKey, amount: u64, on_counterparty_tx_csv: u16) -> Self {
+ pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, per_commitment_key: SecretKey, amount: u64, on_counterparty_tx_csv: u16, is_counterparty_balance_on_anchors: bool) -> Self {
RevokedOutput {
per_commitment_point,
counterparty_delayed_payment_base_key,
per_commitment_key,
weight: WEIGHT_REVOKED_OUTPUT,
amount,
- on_counterparty_tx_csv
+ on_counterparty_tx_csv,
+ is_counterparty_balance_on_anchors: if is_counterparty_balance_on_anchors { Some(()) } else { None }
}
}
}
(8, weight, required),
(10, amount, required),
(12, on_counterparty_tx_csv, required),
+ (14, is_counterparty_balance_on_anchors, option)
});
/// A struct to describe a revoked offered output and corresponding information to generate a
};
absolute_timelock
}
+
+ fn map_output_type_flags(&self) -> (PackageMalleability, bool) {
+ // Post-anchor, aggregation of outputs of different types is unsafe. See https://github.com/lightning/bolts/pull/803.
+ let (malleability, aggregable) = match self {
+ PackageSolvingData::RevokedOutput(RevokedOutput { is_counterparty_balance_on_anchors: Some(()), .. }) => { (PackageMalleability::Malleable, false) },
+ PackageSolvingData::RevokedOutput(RevokedOutput { is_counterparty_balance_on_anchors: None, .. }) => { (PackageMalleability::Malleable, true) },
+ PackageSolvingData::RevokedHTLCOutput(..) => { (PackageMalleability::Malleable, true) },
+ PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => { (PackageMalleability::Malleable, true) },
+ PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => { (PackageMalleability::Malleable, false) },
+ PackageSolvingData::HolderHTLCOutput(ref outp) => if outp.opt_anchors() {
+ (PackageMalleability::Malleable, outp.preimage.is_some())
+ } else {
+ (PackageMalleability::Untractable, false)
+ },
+ PackageSolvingData::HolderFundingOutput(..) => { (PackageMalleability::Untractable, false) },
+ };
+ (malleability, aggregable)
+ }
}
impl_writeable_tlv_based_enum!(PackageSolvingData, ;
);
/// A malleable package might be aggregated with other packages to save on fees.
-/// A untractable package has been counter-signed and aggregable will break cached counterparty
-/// signatures.
+/// A untractable package has been counter-signed and aggregable will break cached counterparty signatures.
#[derive(Clone, PartialEq, Eq)]
pub(crate) enum PackageMalleability {
Malleable,
}).is_some()
}
- pub (crate) fn build_package(txid: Txid, vout: u32, input_solving_data: PackageSolvingData, soonest_conf_deadline: u32, aggregable: bool, height_original: u32) -> Self {
- let malleability = match input_solving_data {
- PackageSolvingData::RevokedOutput(..) => PackageMalleability::Malleable,
- PackageSolvingData::RevokedHTLCOutput(..) => PackageMalleability::Malleable,
- PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => PackageMalleability::Malleable,
- PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => PackageMalleability::Malleable,
- PackageSolvingData::HolderHTLCOutput(ref outp) => if outp.opt_anchors() {
- PackageMalleability::Malleable
- } else {
- PackageMalleability::Untractable
- },
- PackageSolvingData::HolderFundingOutput(..) => PackageMalleability::Untractable,
- };
+ pub (crate) fn build_package(txid: Txid, vout: u32, input_solving_data: PackageSolvingData, soonest_conf_deadline: u32, height_original: u32) -> Self {
+ let (malleability, aggregable) = PackageSolvingData::map_output_type_flags(&input_solving_data);
let mut inputs = Vec::with_capacity(1);
inputs.push((BitcoinOutPoint { txid, vout }, input_solving_data));
PackageTemplate {
inputs.push((outpoint, rev_outp));
}
let (malleability, aggregable) = if let Some((_, lead_input)) = inputs.first() {
- match lead_input {
- PackageSolvingData::RevokedOutput(..) => { (PackageMalleability::Malleable, true) },
- PackageSolvingData::RevokedHTLCOutput(..) => { (PackageMalleability::Malleable, true) },
- PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => { (PackageMalleability::Malleable, true) },
- PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => { (PackageMalleability::Malleable, false) },
- PackageSolvingData::HolderHTLCOutput(ref outp) => if outp.opt_anchors() {
- (PackageMalleability::Malleable, outp.preimage.is_some())
- } else {
- (PackageMalleability::Untractable, false)
- },
- PackageSolvingData::HolderFundingOutput(..) => { (PackageMalleability::Untractable, false) },
- }
+ PackageSolvingData::map_output_type_flags(&lead_input)
} else { return Err(DecodeError::InvalidValue); };
let mut soonest_conf_deadline = 0;
let mut feerate_previous = 0;
use bitcoin::secp256k1::Secp256k1;
macro_rules! dumb_revk_output {
- ($secp_ctx: expr) => {
+ ($secp_ctx: expr, $is_counterparty_balance_on_anchors: expr) => {
{
let dumb_scalar = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
let dumb_point = PublicKey::from_secret_key(&$secp_ctx, &dumb_scalar);
- PackageSolvingData::RevokedOutput(RevokedOutput::build(dumb_point, dumb_point, dumb_point, dumb_scalar, 0, 0))
+ PackageSolvingData::RevokedOutput(RevokedOutput::build(dumb_point, dumb_point, dumb_point, dumb_scalar, 0, 0, $is_counterparty_balance_on_anchors))
}
}
}
fn test_package_differing_heights() {
let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
let secp_ctx = Secp256k1::new();
- let revk_outp = dumb_revk_output!(secp_ctx);
+ let revk_outp = dumb_revk_output!(secp_ctx, false);
- let mut package_one_hundred = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, true, 100);
- let package_two_hundred = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, true, 200);
+ let mut package_one_hundred = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, 100);
+ let package_two_hundred = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, 200);
package_one_hundred.merge_package(package_two_hundred);
}
fn test_package_untractable_merge_to() {
let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
let secp_ctx = Secp256k1::new();
- let revk_outp = dumb_revk_output!(secp_ctx);
+ let revk_outp = dumb_revk_output!(secp_ctx, false);
let htlc_outp = dumb_htlc_output!();
- let mut untractable_package = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, true, 100);
- let malleable_package = PackageTemplate::build_package(txid, 1, htlc_outp.clone(), 1000, true, 100);
+ let mut untractable_package = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, 100);
+ let malleable_package = PackageTemplate::build_package(txid, 1, htlc_outp.clone(), 1000, 100);
untractable_package.merge_package(malleable_package);
}
let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
let secp_ctx = Secp256k1::new();
let htlc_outp = dumb_htlc_output!();
- let revk_outp = dumb_revk_output!(secp_ctx);
+ let revk_outp = dumb_revk_output!(secp_ctx, false);
- let mut malleable_package = PackageTemplate::build_package(txid, 0, htlc_outp.clone(), 1000, true, 100);
- let untractable_package = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, true, 100);
+ let mut malleable_package = PackageTemplate::build_package(txid, 0, htlc_outp.clone(), 1000, 100);
+ let untractable_package = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, 100);
malleable_package.merge_package(untractable_package);
}
fn test_package_noaggregation_to() {
let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
let secp_ctx = Secp256k1::new();
- let revk_outp = dumb_revk_output!(secp_ctx);
+ let revk_outp = dumb_revk_output!(secp_ctx, false);
+ let revk_outp_counterparty_balance = dumb_revk_output!(secp_ctx, true);
- let mut noaggregation_package = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, false, 100);
- let aggregation_package = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, true, 100);
+ let mut noaggregation_package = PackageTemplate::build_package(txid, 0, revk_outp_counterparty_balance.clone(), 1000, 100);
+ let aggregation_package = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, 100);
noaggregation_package.merge_package(aggregation_package);
}
fn test_package_noaggregation_from() {
let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
let secp_ctx = Secp256k1::new();
- let revk_outp = dumb_revk_output!(secp_ctx);
+ let revk_outp = dumb_revk_output!(secp_ctx, false);
+ let revk_outp_counterparty_balance = dumb_revk_output!(secp_ctx, true);
- let mut aggregation_package = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, true, 100);
- let noaggregation_package = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, false, 100);
+ let mut aggregation_package = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, 100);
+ let noaggregation_package = PackageTemplate::build_package(txid, 1, revk_outp_counterparty_balance.clone(), 1000, 100);
aggregation_package.merge_package(noaggregation_package);
}
fn test_package_empty() {
let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
let secp_ctx = Secp256k1::new();
- let revk_outp = dumb_revk_output!(secp_ctx);
+ let revk_outp = dumb_revk_output!(secp_ctx, false);
- let mut empty_package = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, true, 100);
+ let mut empty_package = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, 100);
empty_package.inputs = vec![];
- let package = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, true, 100);
+ let package = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, 100);
empty_package.merge_package(package);
}
fn test_package_differing_categories() {
let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
let secp_ctx = Secp256k1::new();
- let revk_outp = dumb_revk_output!(secp_ctx);
+ let revk_outp = dumb_revk_output!(secp_ctx, false);
let counterparty_outp = dumb_counterparty_output!(secp_ctx, 0, false);
- let mut revoked_package = PackageTemplate::build_package(txid, 0, revk_outp, 1000, true, 100);
- let counterparty_package = PackageTemplate::build_package(txid, 1, counterparty_outp, 1000, true, 100);
+ let mut revoked_package = PackageTemplate::build_package(txid, 0, revk_outp, 1000, 100);
+ let counterparty_package = PackageTemplate::build_package(txid, 1, counterparty_outp, 1000, 100);
revoked_package.merge_package(counterparty_package);
}
fn test_package_split_malleable() {
let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
let secp_ctx = Secp256k1::new();
- let revk_outp_one = dumb_revk_output!(secp_ctx);
- let revk_outp_two = dumb_revk_output!(secp_ctx);
- let revk_outp_three = dumb_revk_output!(secp_ctx);
+ let revk_outp_one = dumb_revk_output!(secp_ctx, false);
+ let revk_outp_two = dumb_revk_output!(secp_ctx, false);
+ let revk_outp_three = dumb_revk_output!(secp_ctx, false);
- let mut package_one = PackageTemplate::build_package(txid, 0, revk_outp_one, 1000, true, 100);
- let package_two = PackageTemplate::build_package(txid, 1, revk_outp_two, 1000, true, 100);
- let package_three = PackageTemplate::build_package(txid, 2, revk_outp_three, 1000, true, 100);
+ let mut package_one = PackageTemplate::build_package(txid, 0, revk_outp_one, 1000, 100);
+ let package_two = PackageTemplate::build_package(txid, 1, revk_outp_two, 1000, 100);
+ let package_three = PackageTemplate::build_package(txid, 2, revk_outp_three, 1000, 100);
package_one.merge_package(package_two);
package_one.merge_package(package_three);
let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
let htlc_outp_one = dumb_htlc_output!();
- let mut package_one = PackageTemplate::build_package(txid, 0, htlc_outp_one, 1000, true, 100);
+ let mut package_one = PackageTemplate::build_package(txid, 0, htlc_outp_one, 1000, 100);
let ret_split = package_one.split_package(&BitcoinOutPoint { txid, vout: 0});
assert!(ret_split.is_none());
}
fn test_package_timer() {
let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
let secp_ctx = Secp256k1::new();
- let revk_outp = dumb_revk_output!(secp_ctx);
+ let revk_outp = dumb_revk_output!(secp_ctx, false);
- let mut package = PackageTemplate::build_package(txid, 0, revk_outp, 1000, true, 100);
+ let mut package = PackageTemplate::build_package(txid, 0, revk_outp, 1000, 100);
assert_eq!(package.timer(), 100);
package.set_timer(101);
assert_eq!(package.timer(), 101);
let secp_ctx = Secp256k1::new();
let counterparty_outp = dumb_counterparty_output!(secp_ctx, 1_000_000, false);
- let package = PackageTemplate::build_package(txid, 0, counterparty_outp, 1000, true, 100);
+ let package = PackageTemplate::build_package(txid, 0, counterparty_outp, 1000, 100);
assert_eq!(package.package_amount(), 1000);
}
let weight_sans_output = (4 + 4 + 1 + 36 + 4 + 1 + 1 + 8 + 1) * WITNESS_SCALE_FACTOR + 2;
{
- let revk_outp = dumb_revk_output!(secp_ctx);
- let package = PackageTemplate::build_package(txid, 0, revk_outp, 0, true, 100);
+ let revk_outp = dumb_revk_output!(secp_ctx, false);
+ let package = PackageTemplate::build_package(txid, 0, revk_outp, 0, 100);
assert_eq!(package.package_weight(&Script::new()), weight_sans_output + WEIGHT_REVOKED_OUTPUT as usize);
}
{
for &opt_anchors in [false, true].iter() {
let counterparty_outp = dumb_counterparty_output!(secp_ctx, 1_000_000, opt_anchors);
- let package = PackageTemplate::build_package(txid, 0, counterparty_outp, 1000, true, 100);
+ let package = PackageTemplate::build_package(txid, 0, counterparty_outp, 1000, 100);
assert_eq!(package.package_weight(&Script::new()), weight_sans_output + weight_received_htlc(opt_anchors) as usize);
}
}
{
for &opt_anchors in [false, true].iter() {
let counterparty_outp = dumb_counterparty_offered_output!(secp_ctx, 1_000_000, opt_anchors);
- let package = PackageTemplate::build_package(txid, 0, counterparty_outp, 1000, true, 100);
+ let package = PackageTemplate::build_package(txid, 0, counterparty_outp, 1000, 100);
assert_eq!(package.package_weight(&Script::new()), weight_sans_output + weight_offered_htlc(opt_anchors) as usize);
}
}
#![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
-#![cfg_attr(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"), feature(test))]
-#[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))] extern crate test;
-
#[cfg(not(any(feature = "std", feature = "no-std")))]
compile_error!("at least one of the `std` or `no-std` features must be enabled");
#[cfg(not(feature = "std"))] extern crate core2;
+#[cfg(ldk_bench)] extern crate criterion;
+
#[macro_use]
pub mod util;
pub mod chain;
pub use alloc::string::ToString;
}
-#[cfg(all(not(feature = "_bench_unstable"), feature = "backtrace", feature = "std", test))]
+#[cfg(all(not(ldk_bench), feature = "backtrace", feature = "std", test))]
extern crate backtrace;
mod sync;
//! There are a bunch of these as their handling is relatively error-prone so they are split out
//! here. See also the chanmon_fail_consistency fuzz test.
-use bitcoin::blockdata::block::{Block, BlockHeader};
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::hash_types::BlockHash;
use bitcoin::network::constants::Network;
use crate::io;
use bitcoin::hashes::Hash;
-use bitcoin::TxMerkleNode;
use crate::prelude::*;
use crate::sync::{Arc, Mutex};
assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
chain_mon
};
- let header = BlockHeader {
- version: 0x20000000,
- prev_blockhash: BlockHash::all_zeros(),
- merkle_root: TxMerkleNode::all_zeros(),
- time: 42,
- bits: 42,
- nonce: 42
- };
- chain_mon.chain_monitor.block_connected(&Block { header, txdata: vec![] }, 200);
+ chain_mon.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), 200);
// Set the persister's return value to be a InProgress.
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
/// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
+/// The number of ticks that may elapse while we're waiting for a response to a
+/// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
+/// them.
+///
+/// See [`Channel::sent_message_awaiting_response`] for more information.
+pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
+
struct PendingChannelMonitorUpdate {
update: ChannelMonitorUpdate,
/// In some cases we need to delay letting the [`ChannelMonitorUpdate`] go until after an
/// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
+ /// An option set when we wish to track how many ticks have elapsed while waiting for a response
+ /// from our counterparty after sending a message. If the peer has yet to respond after reaching
+ /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
+ /// unblock the state machine.
+ ///
+ /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
+ /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
+ /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
+ ///
+ /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
+ /// [`msgs::RevokeAndACK`] message from the counterparty.
+ sent_message_awaiting_response: Option<usize>,
+
#[cfg(any(test, fuzzing))]
// When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
// corresponding HTLC on the inbound path. If, then, the outbound path channel is
next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
workaround_lnd_bug_4006: None,
+ sent_message_awaiting_response: None,
latest_inbound_scid_alias: None,
outbound_scid_alias,
next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
workaround_lnd_bug_4006: None,
+ sent_message_awaiting_response: None,
latest_inbound_scid_alias: None,
outbound_scid_alias,
// OK, we step the channel here and *then* if the new generation fails we can fail the
// channel based on that, but stepping stuff here should be safe either way.
self.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
+ self.sent_message_awaiting_response = None;
self.counterparty_prev_commitment_point = self.counterparty_cur_commitment_point;
self.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
self.cur_counterparty_commitment_transaction_number -= 1;
}
}
+ self.sent_message_awaiting_response = None;
+
self.channel_state |= ChannelState::PeerDisconnected as u32;
log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.channel_id()));
}
Some(self.get_last_revoke_and_ack())
} else { None };
let commitment_update = if self.monitor_pending_commitment_signed {
+ self.mark_awaiting_response();
Some(self.get_last_commitment_update(logger))
} else { None };
// Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
// remaining cases either succeed or ErrorMessage-fail).
self.channel_state &= !(ChannelState::PeerDisconnected as u32);
+ self.sent_message_awaiting_response = None;
let shutdown_msg = if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
assert!(self.shutdown_scriptpubkey.is_some());
// revoke_and_ack, not on sending commitment_signed, so we add one if have
// AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
// the corresponding revoke_and_ack back yet.
- let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.cur_counterparty_commitment_transaction_number + if (self.channel_state & ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 };
+ let is_awaiting_remote_revoke = self.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
+ if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
+ self.mark_awaiting_response();
+ }
+ let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number == 1 {
// We should never have to worry about MonitorUpdateInProgress resending ChannelReady
}), None))
}
+ // Marks a channel as waiting for a response from the counterparty. If it's not received
+ // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
+ // a reconnection.
+ fn mark_awaiting_response(&mut self) {
+ self.sent_message_awaiting_response = Some(0);
+ }
+
+ /// Determines whether we should disconnect the counterparty due to not receiving a response
+ /// within our expected timeframe.
+ ///
+ /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
+ pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
+ let ticks_elapsed = if let Some(ticks_elapsed) = self.sent_message_awaiting_response.as_mut() {
+ ticks_elapsed
+ } else {
+ // Don't disconnect when we're not waiting on a response.
+ return false;
+ };
+ *ticks_elapsed += 1;
+ *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
+ }
+
pub fn shutdown<SP: Deref>(
&mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
) -> Result<(Option<msgs::Shutdown>, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
/// May panic if called on a channel that wasn't immediately-previously
/// self.remove_uncommitted_htlcs_and_mark_paused()'d
- pub fn get_channel_reestablish<L: Deref>(&self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
+ pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
assert_eq!(self.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
assert_ne!(self.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
// Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.channel_id()));
[0;32]
};
+ self.mark_awaiting_response();
msgs::ChannelReestablish {
channel_id: self.channel_id(),
// The protocol has two different commitment number concepts - the "commitment
next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
workaround_lnd_bug_4006: None,
+ sent_message_awaiting_response: None,
latest_inbound_scid_alias,
// Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
use crate::ln::features::InvoiceFeatures;
use crate::routing::gossip::NetworkGraph;
use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteHop, RouteParameters, Router};
-use crate::routing::scoring::ProbabilisticScorer;
+use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
use crate::ln::msgs;
use crate::ln::onion_utils;
use crate::ln::onion_utils::HTLCFailReason;
Arc<DefaultRouter<
Arc<NetworkGraph<Arc<L>>>,
Arc<L>,
- Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>
+ Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
+ ProbabilisticScoringFeeParameters,
+ ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
>>,
Arc<L>
>;
/// of [`KeysManager`] and [`DefaultRouter`].
///
/// This is not exported to bindings users as Arcs don't make sense in bindings
-pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>>, &'g L>;
+pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>, ProbabilisticScoringFeeParameters, ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>, &'g L>;
macro_rules! define_test_pub_trait { ($vis: vis) => {
/// A trivial trait which describes any [`ChannelManager`] used in testing.
let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
.map_err(|_| APIError::InvalidRoute{err: "Pubkey along hop was maliciously selected".to_owned()})?;
let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, recipient_onion, cur_height, keysend_preimage)?;
- if onion_utils::route_size_insane(&onion_payloads) {
- return Err(APIError::InvalidRoute{err: "Route size too large considering onion data".to_owned()});
- }
- let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash);
+
+ let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash)
+ .map_err(|_| APIError::InvalidRoute { err: "Route size too large considering onion data".to_owned()})?;
let err: Result<(), _> = loop {
let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) {
chan.maybe_expire_prev_config();
+ if chan.should_disconnect_peer_awaiting_response() {
+ log_debug!(self.logger, "Disconnecting peer {} due to not making any progress on channel {}",
+ counterparty_node_id, log_bytes!(*chan_id));
+ pending_msg_events.push(MessageSendEvent::HandleError {
+ node_id: counterparty_node_id,
+ action: msgs::ErrorAction::DisconnectPeerWithWarning {
+ msg: msgs::WarningMessage {
+ channel_id: *chan_id,
+ data: "Disconnecting due to timeout awaiting response".to_owned(),
+ },
+ },
+ });
+ }
+
true
});
if peer_state.ok_to_remove(true) {
if let Some(tx) = funding_broadcastable {
log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
- self.tx_broadcaster.broadcast_transaction(&tx);
+ self.tx_broadcaster.broadcast_transactions(&[&tx]);
}
{
};
if let Some(broadcast_tx) = tx {
log_info!(self.logger, "Broadcasting {}", log_tx!(broadcast_tx));
- self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
+ self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
}
if let Some(chan) = chan_option {
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure);
log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
- self.tx_broadcaster.broadcast_transaction(&tx);
+ self.tx_broadcaster.broadcast_transactions(&[&tx]);
update_maps_on_chan_removal!(self, chan);
false
} else { true }
};
let route = find_route(
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
- None, nodes[0].logger, &scorer, &random_seed_bytes
+ None, nodes[0].logger, &scorer, &(), &random_seed_bytes
).unwrap();
nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
let payment_preimage = PaymentPreimage([42; 32]);
let route = find_route(
&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
- None, nodes[0].logger, &scorer, &random_seed_bytes
+ None, nodes[0].logger, &scorer, &(), &random_seed_bytes
).unwrap();
let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
- nodes[0].logger, &scorer, &random_seed_bytes
+ nodes[0].logger, &scorer, &(), &random_seed_bytes
).unwrap();
let test_preimage = PaymentPreimage([42; 32]);
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
- nodes[0].logger, &scorer, &random_seed_bytes
+ nodes[0].logger, &scorer, &(), &random_seed_bytes
).unwrap();
let test_preimage = PaymentPreimage([42; 32]);
}
}
-#[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))]
+#[cfg(ldk_bench)]
pub mod bench {
use crate::chain::Listen;
use crate::chain::chainmonitor::{ChainMonitor, Persist};
use crate::sync::{Arc, Mutex};
- use test::Bencher;
+ use criterion::Criterion;
type Manager<'a, P> = ChannelManager<
&'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor> { None }
}
- #[cfg(test)]
- #[bench]
- fn bench_sends(bench: &mut Bencher) {
- bench_two_sends(bench, test_utils::TestPersister::new(), test_utils::TestPersister::new());
+ pub fn bench_sends(bench: &mut Criterion) {
+ bench_two_sends(bench, "bench_sends", test_utils::TestPersister::new(), test_utils::TestPersister::new());
}
- pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Bencher, persister_a: P, persister_b: P) {
+ pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Criterion, bench_name: &str, persister_a: P, persister_b: P) {
// Do a simple benchmark of sending a payment back and forth between two nodes.
// Note that this is unrealistic as each payment send will require at least two fsync
// calls per node.
assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
- let block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: BestBlock::from_network(network).block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
- txdata: vec![tx],
- };
+ let block = create_dummy_block(BestBlock::from_network(network).block_hash(), 42, vec![tx]);
Listen::block_connected(&node_a, &block, 1);
Listen::block_connected(&node_b, &block, 1);
}
}
- bench.iter(|| {
+ bench.bench_function(bench_name, |b| b.iter(|| {
send_payment!(node_a, node_b);
send_payment!(node_b, node_a);
- });
+ }));
}
}
mark: PhantomData<T>,
}
-impl <T: sealed::Context> Features<T> {
- pub(crate) fn or(mut self, o: Self) -> Self {
+impl<T: sealed::Context> core::ops::BitOr for Features<T> {
+ type Output = Self;
+
+ fn bitor(mut self, o: Self) -> Self {
let total_feature_len = cmp::max(self.flags.len(), o.flags.len());
self.flags.resize(total_feature_len, 0u8);
for (byte, o_byte) in self.flags.iter_mut().zip(o.flags.iter()) {
self.flags.eq(&o.flags)
}
}
+impl<T: sealed::Context> PartialOrd for Features<T> {
+ fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
+ self.flags.partial_cmp(&other.flags)
+ }
+}
+impl<T: sealed::Context + Eq> Ord for Features<T> {
+ fn cmp(&self, other: &Self) -> cmp::Ordering {
+ self.flags.cmp(&other.flags)
+ }
+}
impl<T: sealed::Context> fmt::Debug for Features<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
self.flags.fmt(fmt)
self.flags.iter().any(|&byte| (byte & 0b10_10_10_10) != 0)
}
+ /// Returns true if this `Features` object contains required features unknown by `other`.
+ pub fn requires_unknown_bits_from(&self, other: &Features<T>) -> bool {
+ // Bitwise AND-ing with all even bits set except for known features will select required
+ // unknown features.
+ self.flags.iter().enumerate().any(|(i, &byte)| {
+ const REQUIRED_FEATURES: u8 = 0b01_01_01_01;
+ const OPTIONAL_FEATURES: u8 = 0b10_10_10_10;
+ let unknown_features = if i < other.flags.len() {
+ // Form a mask similar to !T::KNOWN_FEATURE_MASK only for `other`
+ !(other.flags[i]
+ | ((other.flags[i] >> 1) & REQUIRED_FEATURES)
+ | ((other.flags[i] << 1) & OPTIONAL_FEATURES))
+ } else {
+ 0b11_11_11_11
+ };
+ (byte & (REQUIRED_FEATURES & unknown_features)) != 0
+ })
+ }
+
/// Returns true if this `Features` object contains unknown feature flags which are set as
/// "required".
pub fn requires_unknown_bits(&self) -> bool {
}
true
}
+
+ /// Sets a required custom feature bit. Errors if `bit` is outside the custom range as defined
+ /// by [bLIP 2] or if it is a known `T` feature.
+ ///
+ /// Note: Required bits are even. If an odd bit is given, then the corresponding even bit will
+ /// be set instead (i.e., `bit - 1`).
+ ///
+ /// [bLIP 2]: https://github.com/lightning/blips/blob/master/blip-0002.md#feature-bits
+ pub fn set_required_custom_bit(&mut self, bit: usize) -> Result<(), ()> {
+ self.set_custom_bit(bit - (bit % 2))
+ }
+
+ /// Sets an optional custom feature bit. Errors if `bit` is outside the custom range as defined
+ /// by [bLIP 2] or if it is a known `T` feature.
+ ///
+ /// Note: Optional bits are odd. If an even bit is given, then the corresponding odd bit will be
+ /// set instead (i.e., `bit + 1`).
+ ///
+ /// [bLIP 2]: https://github.com/lightning/blips/blob/master/blip-0002.md#feature-bits
+ pub fn set_optional_custom_bit(&mut self, bit: usize) -> Result<(), ()> {
+ self.set_custom_bit(bit + (1 - (bit % 2)))
+ }
+
+ fn set_custom_bit(&mut self, bit: usize) -> Result<(), ()> {
+ if bit < 256 {
+ return Err(());
+ }
+
+ let byte_offset = bit / 8;
+ let mask = 1 << (bit - 8 * byte_offset);
+ if byte_offset < T::KNOWN_FEATURE_MASK.len() {
+ if (T::KNOWN_FEATURE_MASK[byte_offset] & mask) != 0 {
+ return Err(());
+ }
+ }
+
+ if self.flags.len() <= byte_offset {
+ self.flags.resize(byte_offset + 1, 0u8);
+ }
+
+ self.flags[byte_offset] |= mask;
+
+ Ok(())
+ }
}
impl<T: sealed::UpfrontShutdownScript> Features<T> {
assert!(features.supports_unknown_bits());
}
+ #[test]
+ fn requires_unknown_bits_from() {
+ let mut features1 = InitFeatures::empty();
+ let mut features2 = InitFeatures::empty();
+ assert!(!features1.requires_unknown_bits_from(&features2));
+ assert!(!features2.requires_unknown_bits_from(&features1));
+
+ features1.set_data_loss_protect_required();
+ assert!(features1.requires_unknown_bits_from(&features2));
+ assert!(!features2.requires_unknown_bits_from(&features1));
+
+ features2.set_data_loss_protect_optional();
+ assert!(!features1.requires_unknown_bits_from(&features2));
+ assert!(!features2.requires_unknown_bits_from(&features1));
+
+ features2.set_gossip_queries_required();
+ assert!(!features1.requires_unknown_bits_from(&features2));
+ assert!(features2.requires_unknown_bits_from(&features1));
+
+ features1.set_gossip_queries_optional();
+ assert!(!features1.requires_unknown_bits_from(&features2));
+ assert!(!features2.requires_unknown_bits_from(&features1));
+
+ features1.set_variable_length_onion_required();
+ assert!(features1.requires_unknown_bits_from(&features2));
+ assert!(!features2.requires_unknown_bits_from(&features1));
+
+ features2.set_variable_length_onion_optional();
+ assert!(!features1.requires_unknown_bits_from(&features2));
+ assert!(!features2.requires_unknown_bits_from(&features1));
+
+ features1.set_basic_mpp_required();
+ features2.set_wumbo_required();
+ assert!(features1.requires_unknown_bits_from(&features2));
+ assert!(features2.requires_unknown_bits_from(&features1));
+ }
+
#[test]
fn convert_to_context_with_relevant_flags() {
let mut init_features = InitFeatures::empty();
init_features.set_payment_secret_required();
init_features.set_basic_mpp_optional();
init_features.set_wumbo_optional();
+ init_features.set_anchors_zero_fee_htlc_tx_optional();
init_features.set_shutdown_any_segwit_optional();
init_features.set_onion_messages_optional();
init_features.set_channel_type_optional();
init_features.set_scid_privacy_optional();
init_features.set_zero_conf_optional();
- init_features.set_anchors_zero_fee_htlc_tx_optional();
assert!(init_features.initial_routing_sync());
assert!(!init_features.supports_upfront_shutdown_script());
// Check that the flags are as expected:
// - option_data_loss_protect (req)
// - var_onion_optin (req) | static_remote_key (req) | payment_secret(req)
- // - basic_mpp | wumbo
+ // - basic_mpp | wumbo | anchors_zero_fee_htlc_tx
// - opt_shutdown_anysegwit
// - onion_messages
// - option_channel_type | option_scid_alias
assert!(features.supports_payment_secret());
}
+ #[test]
+ fn set_custom_bits() {
+ let mut features = InvoiceFeatures::empty();
+ features.set_variable_length_onion_optional();
+ assert_eq!(features.flags[1], 0b00000010);
+
+ assert!(features.set_optional_custom_bit(255).is_err());
+ assert!(features.set_required_custom_bit(256).is_ok());
+ assert!(features.set_required_custom_bit(258).is_ok());
+ assert_eq!(features.flags[31], 0b00000000);
+ assert_eq!(features.flags[32], 0b00000101);
+
+ let known_bit = <sealed::InvoiceContext as sealed::PaymentSecret>::EVEN_BIT;
+ let byte_offset = <sealed::InvoiceContext as sealed::PaymentSecret>::BYTE_OFFSET;
+ assert_eq!(byte_offset, 1);
+ assert_eq!(features.flags[byte_offset], 0b00000010);
+ assert!(features.set_required_custom_bit(known_bit).is_err());
+ assert_eq!(features.flags[byte_offset], 0b00000010);
+
+ let mut features = InvoiceFeatures::empty();
+ assert!(features.set_optional_custom_bit(256).is_ok());
+ assert!(features.set_optional_custom_bit(259).is_ok());
+ assert_eq!(features.flags[32], 0b00001010);
+
+ let mut features = InvoiceFeatures::empty();
+ assert!(features.set_required_custom_bit(257).is_ok());
+ assert!(features.set_required_custom_bit(258).is_ok());
+ assert_eq!(features.flags[32], 0b00000101);
+ }
+
#[test]
fn encodes_features_without_length() {
let features = OfferFeatures::from_le_bytes(vec![1, 2, 3, 4, 5, 42, 100, 101]);
if conf_height > first_connect_height {
connect_blocks(node, conf_height - first_connect_height);
}
- let mut block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: node.best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: conf_height, bits: 42, nonce: 42 },
- txdata: Vec::new(),
- };
+ let mut txdata = Vec::new();
for _ in 0..*node.network_chan_count.borrow() { // Make sure we don't end up with channels at the same short id by offsetting by chan_count
- block.txdata.push(Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() });
+ txdata.push(Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() });
}
for tx in txn {
- block.txdata.push((*tx).clone());
+ txdata.push((*tx).clone());
}
+ let block = create_dummy_block(node.best_block_hash(), conf_height, txdata);
connect_block(node, &block);
scid_utils::scid_from_parts(conf_height as u64, block.txdata.len() as u64 - 1, 0).unwrap()
}
}
}
+pub fn create_dummy_header(prev_blockhash: BlockHash, time: u32) -> BlockHeader {
+ BlockHeader {
+ version: 0x2000_0000,
+ prev_blockhash,
+ merkle_root: TxMerkleNode::all_zeros(),
+ time,
+ bits: 42,
+ nonce: 42,
+ }
+}
+
+pub fn create_dummy_block(prev_blockhash: BlockHash, time: u32, txdata: Vec<Transaction>) -> Block {
+ Block { header: create_dummy_header(prev_blockhash, time), txdata }
+}
+
pub fn connect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, depth: u32) -> BlockHash {
let skip_intermediaries = node.connect_style.borrow().skips_blocks();
let height = node.best_block_info().1 + 1;
- let mut block = Block {
- header: BlockHeader { version: 0x2000000, prev_blockhash: node.best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 },
- txdata: vec![],
- };
+ let mut block = create_dummy_block(node.best_block_hash(), height, Vec::new());
assert!(depth >= 1);
for i in 1..depth {
let prev_blockhash = block.header.block_hash();
do_connect_block(node, block, skip_intermediaries);
- block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: TxMerkleNode::all_zeros(), time: height + i, bits: 42, nonce: 42 },
- txdata: vec![],
- };
+ block = create_dummy_block(prev_blockhash, height + i, Vec::new());
}
let hash = block.header.block_hash();
do_connect_block(node, block, false);
router::get_route(
&send_node.node.get_our_node_id(), payment_params, &send_node.network_graph.read_only(),
Some(&send_node.node.list_usable_channels().iter().collect::<Vec<_>>()),
- recv_value, send_node.logger, &scorer, &random_seed_bytes
+ recv_value, send_node.logger, &scorer, &(), &random_seed_bytes
)
}
}
#[macro_export]
-#[cfg(any(test, feature = "_bench_unstable", feature = "_test_utils"))]
+#[cfg(any(test, ldk_bench, feature = "_test_utils"))]
macro_rules! expect_payment_claimable {
($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr) => {
expect_payment_claimable!($node, $expected_payment_hash, $expected_payment_secret, $expected_recv_value, None, $node.node.get_our_node_id())
}
#[macro_export]
-#[cfg(any(test, feature = "_bench_unstable", feature = "_test_utils"))]
+#[cfg(any(test, ldk_bench, feature = "_test_utils"))]
macro_rules! expect_payment_claimed {
($node: expr, $expected_payment_hash: expr, $expected_recv_value: expr) => {
let events = $node.node.get_and_clear_pending_events();
}
}
-#[cfg(any(test, feature = "_bench_unstable", feature = "_test_utils"))]
+#[cfg(any(test, ldk_bench, feature = "_test_utils"))]
pub fn expect_channel_pending_event<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, expected_counterparty_node_id: &PublicKey) {
let events = node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
}
}
-#[cfg(any(test, feature = "_bench_unstable", feature = "_test_utils"))]
+#[cfg(any(test, ldk_bench, feature = "_test_utils"))]
pub fn expect_channel_ready_event<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, expected_counterparty_node_id: &PublicKey) {
let events = node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let route = router::get_route(
&origin_node.node.get_our_node_id(), &payment_params, &network_graph,
- None, recv_value, origin_node.logger, &scorer, &random_seed_bytes).unwrap();
+ None, recv_value, origin_node.logger, &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 1);
assert_eq!(route.paths[0].hops.len(), expected_route.len());
for (node, hop) in expected_route.iter().zip(route.paths[0].hops.iter()) {
use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash};
use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT};
use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
-use crate::ln::channel::{Channel, ChannelError};
+use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, Channel, ChannelError};
use crate::ln::{chan_utils, onion_utils};
use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
use crate::routing::gossip::{NetworkGraph, NetworkUpdate};
use crate::util::config::UserConfig;
use bitcoin::hash_types::BlockHash;
-use bitcoin::blockdata::block::{Block, BlockHeader};
use bitcoin::blockdata::script::{Builder, Script};
use bitcoin::blockdata::opcodes;
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::network::constants::Network;
-use bitcoin::{PackedLockTime, Sequence, Transaction, TxIn, TxMerkleNode, TxOut, Witness};
+use bitcoin::{PackedLockTime, Sequence, Transaction, TxIn, TxOut, Witness};
use bitcoin::OutPoint as BitcoinOutPoint;
use bitcoin::secp256k1::Secp256k1;
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
if steps & 0b1000_0000 != 0{
- let block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
- txdata: vec![],
- };
+ let block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
connect_block(&nodes[0], &block);
connect_block(&nodes[1], &block);
}
let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
3460001, RecipientOnionFields::secret_only(payment_secret), cur_height, &None).unwrap();
- let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
+ let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
let msg = msgs::UpdateAddHTLC {
channel_id: chan.2,
htlc_id: 0,
let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
700_000, RecipientOnionFields::secret_only(payment_secret), cur_height, &None).unwrap();
- let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
+ let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
let msg = msgs::UpdateAddHTLC {
channel_id: chan.2,
htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64,
let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap();
let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
&route_2.paths[0], recv_value_2, RecipientOnionFields::spontaneous_empty(), cur_height, &None).unwrap();
- let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1);
+ let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1).unwrap();
let msg = msgs::UpdateAddHTLC {
channel_id: chan.2,
htlc_id: 1,
assert_eq!(node_txn[1].lock_time.0, 0);
// Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
- let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
- connect_block(&nodes[1], &Block { header, txdata: vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()]});
+ connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()]));
connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
{
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
// we already checked the same situation with A.
// Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
- let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
- connect_block(&nodes[0], &Block { header, txdata: vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()] });
+ connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()]));
connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(
&route.paths[0], 50_000, RecipientOnionFields::secret_only(payment_secret), current_height, &None).unwrap();
let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
- let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
+ let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
// Send a 0-msat update_add_htlc to fail the channel.
let update_add_htlc = msgs::UpdateAddHTLC {
assert_eq!(node_txn.len(), 3);
assert_eq!(node_txn[0].txid(), node_txn[1].txid());
- let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
- connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]});
+ let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[1].clone()]);
+ connect_block(&nodes[1], &block);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
// Duplicate the connect_block call since this may happen due to other listeners
// registering new transactions
- connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[2].clone()]});
+ connect_block(&nodes[1], &block);
}
#[test]
route_payment(&nodes[0], &[&nodes[1]], 100000).1
};
- let mut block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
- txdata: vec![],
- };
+ let mut block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
connect_block(&nodes[0], &block);
connect_block(&nodes[1], &block);
let block_count = TEST_FINAL_CLTV + CHAN_CONFIRM_DEPTH + 2 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS;
match event {
Event::SpendableOutputs { mut outputs } => {
for outp in outputs.drain(..) {
- txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &secp_ctx).unwrap());
+ txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx).unwrap());
all_outputs.push(outp);
}
},
};
}
if all_outputs.len() > 1 {
- if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs(&all_outputs.iter().map(|a| a).collect::<Vec<_>>(), Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &secp_ctx) {
+ if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs(&all_outputs.iter().map(|a| a).collect::<Vec<_>>(), Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx) {
txn.push(tx);
}
}
assert_ne!(revoked_htlc_txn[0].lock_time.0, 0); // HTLC-Timeout
// B will generate justice tx from A's revoked commitment/HTLC tx
- let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
- connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] });
+ connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
assert_eq!(revoked_local_txn[0].output[unspent_local_txn_output].script_pubkey.len(), 2 + 20); // P2WPKH
// A will generate justice tx from B's revoked commitment/HTLC tx
- let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
- connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] });
+ connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
assert_eq!(c_txn[0].lock_time.0, 0); // Success tx
// So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
- let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
- connect_block(&nodes[1], &Block { header, txdata: vec![commitment_tx[0].clone(), c_txn[0].clone()]});
+ connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), c_txn[0].clone()]));
check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
check_added_monitors!(nodes[1], 1);
let starting_block = nodes[1].best_block_info();
- let mut block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
- txdata: vec![],
- };
+ let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
for _ in starting_block.1 + 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + starting_block.1 + 2 {
connect_block(&nodes[1], &block);
block.header.prev_blockhash = block.block_hash();
// to "time out" the HTLC.
let starting_block = nodes[1].best_block_info();
- let mut header = BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
+ let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 {
- connect_block(&nodes[0], &Block { header, txdata: Vec::new()});
- header.prev_blockhash = header.block_hash();
+ connect_block(&nodes[0], &block);
+ block.header.prev_blockhash = block.block_hash();
}
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
check_closed_broadcast!(nodes[0], true);
}
let starting_block = nodes[1].best_block_info();
- let mut block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
- txdata: vec![],
- };
+ let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 {
connect_block(&nodes[0], &block);
block.header.prev_blockhash = block.block_hash();
let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap();
let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
&route.paths[0], 3999999, RecipientOnionFields::secret_only(our_payment_secret), cur_height, &None).unwrap();
- let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash);
+ let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
let mut msg = msgs::UpdateAddHTLC {
channel_id: chan.2,
let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV).with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
- let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), None, 10_000, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), None, 10_000, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]);
let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None).unwrap();
nodes[0].node.send_payment_with_route(&route, our_payment_hash,
// Actually revoke tx by claiming a HTLC
claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
- let header = BlockHeader { version: 0x20000000, prev_blockhash: header_114, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
- connect_block(&nodes[1], &Block { header, txdata: vec![revoked_txn[0].clone()] });
+ connect_block(&nodes[1], &create_dummy_block(header_114, 42, vec![revoked_txn[0].clone()]));
check_added_monitors!(nodes[1], 1);
// One or more justice tx should have been broadcast, check it
let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = get_route(&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), None,
- 3_000_000, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
+ 3_000_000, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0;
let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 50).with_bolt11_features(nodes[0].node.invoice_features()).unwrap();
let route = get_route(&nodes[1].node.get_our_node_id(), &payment_params, &nodes[1].network_graph.read_only(), None,
- 3_000_000, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
+ 3_000_000, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000);
let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
// Revoke local commitment tx
claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
- let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
// B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx
- connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] });
+ connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()]));
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
// Broadcast set of revoked txn on A
let hash_128 = connect_blocks(&nodes[0], 40);
- let header_11 = BlockHeader { version: 0x20000000, prev_blockhash: hash_128, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
- connect_block(&nodes[0], &Block { header: header_11, txdata: vec![revoked_local_txn[0].clone()] });
- let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
- connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()] });
+ let block_11 = create_dummy_block(hash_128, 42, vec![revoked_local_txn[0].clone()]);
+ connect_block(&nodes[0], &block_11);
+ let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]);
+ connect_block(&nodes[0], &block_129);
let events = nodes[0].node.get_and_clear_pending_events();
expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
match events.last().unwrap() {
}
// Connect one more block to see if bumped penalty are issued for HTLC txn
- let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: header_129.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
- connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn });
- let header_131 = BlockHeader { version: 0x20000000, prev_blockhash: header_130.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
- connect_block(&nodes[0], &Block { header: header_131, txdata: Vec::new() });
+ let block_130 = create_dummy_block(block_129.block_hash(), 42, penalty_txn);
+ connect_block(&nodes[0], &block_130);
+ let block_131 = create_dummy_block(block_130.block_hash(), 42, Vec::new());
+ connect_block(&nodes[0], &block_131);
// Few more blocks to confirm penalty txn
connect_blocks(&nodes[0], 4);
txn
};
// Broadcast claim txn and confirm blocks to avoid further bumps on this outputs
- let header_145 = BlockHeader { version: 0x20000000, prev_blockhash: header_144, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
- connect_block(&nodes[0], &Block { header: header_145, txdata: node_txn });
+ connect_block(&nodes[0], &create_dummy_block(header_144, 42, node_txn));
connect_blocks(&nodes[0], 20);
{
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
node_txn.clear();
penalty_txn
};
- let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
- connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn });
+ connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, penalty_txn));
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
{
let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(OutPoint { txid: chan.3.txid(), index: 0 }).unwrap();
// Edit amt_to_forward to simulate the sender having set
// the final amount and the routing node taking less fee
onion_payloads[1].amt_to_forward = 99_000;
- let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash);
+ let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
payment_event.msgs[0].onion_routing_packet = new_onion_packet;
}
} else { panic!(); }
let mut block = {
let node_1_blocks = nodes[1].blocks.lock().unwrap();
- Block {
- header: BlockHeader {
- version: 0x2000000,
- prev_blockhash: node_1_blocks.last().unwrap().0.block_hash(),
- merkle_root: TxMerkleNode::all_zeros(),
- time: node_1_blocks.len() as u32 + 7200, bits: 42, nonce: 42 },
- txdata: vec![],
- }
+ create_dummy_block(node_1_blocks.last().unwrap().0.block_hash(), node_1_blocks.len() as u32 + 7200, Vec::new())
};
connect_block(&nodes[1], &block);
if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2) {
assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
watchtower
};
- let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
- let block = Block { header, txdata: vec![] };
+ let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
// Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
// transaction lock time requirements here.
chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (block.clone(), 200));
assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
watchtower
};
- let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
- let block = Block { header, txdata: vec![] };
+ let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
// Make Alice aware of enough blocks that it doesn't think we're violating transaction lock time
// requirements here.
const HTLC_TIMEOUT_BROADCAST: u32 = CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS;
assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
watchtower
};
- let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
- watchtower_bob.chain_monitor.block_connected(&Block { header, txdata: vec![] }, HTLC_TIMEOUT_BROADCAST - 1);
+ watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST - 1);
// Route another payment to generate another update with still previous HTLC pending
let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 3000000);
check_added_monitors!(nodes[0], 1);
//// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout
- let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
- watchtower_bob.chain_monitor.block_connected(&Block { header, txdata: vec![] }, HTLC_TIMEOUT_BROADCAST);
+ watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST);
// Watchtower Bob should have broadcast a commitment/HTLC-timeout
let bob_state_y;
};
// We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout
- let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
let height = HTLC_TIMEOUT_BROADCAST + 1;
connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
check_closed_broadcast(&nodes[0], 1, true);
check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false);
- watchtower_alice.chain_monitor.block_connected(&Block { header, txdata: vec![bob_state_y.clone()] }, height);
+ watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
check_added_monitors(&nodes[0], 1);
{
let htlc_txn = alice_broadcaster.txn_broadcast();
check_spends!(local_txn[0], chan_1.3);
// Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
- let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
- connect_block(&nodes[0], &Block { header, txdata: vec![local_txn[0].clone()] });
+ let block = create_dummy_block(nodes[0].best_block_hash(), 42, vec![local_txn[0].clone()]);
+ connect_block(&nodes[0], &block);
// We deliberately connect the local tx twice as this should provoke a failure calling
// this test before #653 fix.
- chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &Block { header, txdata: vec![local_txn[0].clone()] }, nodes[0].best_block_info().1 + 1);
+ chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &block, nodes[0].best_block_info().1 + 1);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
node_txn[0].clone()
};
- let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
- connect_block(&nodes[0], &Block { header: header_201, txdata: vec![htlc_timeout.clone()] });
+ connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![htlc_timeout.clone()]));
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
expect_payment_failed!(nodes[0], our_payment_hash, false);
}
true => alice_txn.clone(),
false => get_local_commitment_txn!(nodes[1], chan_ab.2)
};
- let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
- connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]});
+ connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
if broadcast_alice {
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
let mut txn_to_broadcast = alice_txn.clone();
if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); }
if !go_onchain_before_fulfill {
- let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
- connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]});
+ connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
// If Bob was the one to force-close, he will have already passed these checks earlier.
if broadcast_alice {
check_closed_broadcast!(nodes[1], true);
};
let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
- let route = find_route(&payer_pubkey, &route_params, &network_graph, None, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
+ let route = find_route(&payer_pubkey, &route_params, &network_graph, None, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
let test_preimage = PaymentPreimage([42; 32]);
let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage),
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
- nodes[0].logger, &scorer, &random_seed_bytes
+ nodes[0].logger, &scorer, &(), &random_seed_bytes
).unwrap();
let test_preimage = PaymentPreimage([42; 32]);
do_payment_with_custom_min_final_cltv_expiry(true, false);
do_payment_with_custom_min_final_cltv_expiry(true, true);
}
+
+#[test]
+fn test_disconnects_peer_awaiting_response_ticks() {
+ // Tests that nodes which are awaiting on a response critical for channel responsiveness
+ // disconnect their counterparty after `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
+ let mut chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Asserts a disconnect event is queued to the user.
+ let check_disconnect_event = |node: &Node, should_disconnect: bool| {
+ let disconnect_event = node.node.get_and_clear_pending_msg_events().iter().find_map(|event|
+ if let MessageSendEvent::HandleError { action, .. } = event {
+ if let msgs::ErrorAction::DisconnectPeerWithWarning { .. } = action {
+ Some(())
+ } else {
+ None
+ }
+ } else {
+ None
+ }
+ );
+ assert_eq!(disconnect_event.is_some(), should_disconnect);
+ };
+
+ // Fires timer ticks ensuring we only attempt to disconnect peers after reaching
+ // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
+ let check_disconnect = |node: &Node| {
+ // No disconnect without any timer ticks.
+ check_disconnect_event(node, false);
+
+ // No disconnect with 1 timer tick less than required.
+ for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS - 1 {
+ node.node.timer_tick_occurred();
+ check_disconnect_event(node, false);
+ }
+
+ // Disconnect after reaching the required ticks.
+ node.node.timer_tick_occurred();
+ check_disconnect_event(node, true);
+
+ // Disconnect again on the next tick if the peer hasn't been disconnected yet.
+ node.node.timer_tick_occurred();
+ check_disconnect_event(node, true);
+ };
+
+ create_chan_between_nodes(&nodes[0], &nodes[1]);
+
+ // We'll start by performing a fee update with Alice (nodes[0]) on the channel.
+ *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2;
+ nodes[0].node.timer_tick_occurred();
+ check_added_monitors!(&nodes[0], 1);
+ let alice_fee_update = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), alice_fee_update.update_fee.as_ref().unwrap());
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &alice_fee_update.commitment_signed);
+ check_added_monitors!(&nodes[1], 1);
+
+ // This will prompt Bob (nodes[1]) to respond with his `CommitmentSigned` and `RevokeAndACK`.
+ let (bob_revoke_and_ack, bob_commitment_signed) = get_revoke_commit_msgs!(&nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revoke_and_ack);
+ check_added_monitors!(&nodes[0], 1);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_commitment_signed);
+ check_added_monitors(&nodes[0], 1);
+
+ // Alice then needs to send her final `RevokeAndACK` to complete the commitment dance. We
+ // pretend Bob hasn't received the message and check whether he'll disconnect Alice after
+ // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
+ let alice_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ check_disconnect(&nodes[1]);
+
+ // Now, we'll reconnect them to test awaiting a `ChannelReestablish` message.
+ //
+ // Note that since the commitment dance didn't complete above, Alice is expected to resend her
+ // final `RevokeAndACK` to Bob to complete it.
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+ let bob_init = msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None };
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &bob_init, true).unwrap();
+ let alice_init = msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None };
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &alice_init, true).unwrap();
+
+ // Upon reconnection, Alice sends her `ChannelReestablish` to Bob. Alice, however, hasn't
+ // received Bob's yet, so she should disconnect him after reaching
+ // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
+ let alice_channel_reestablish = get_event_msg!(
+ nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()
+ );
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &alice_channel_reestablish);
+ check_disconnect(&nodes[0]);
+
+ // Bob now sends his `ChannelReestablish` to Alice to resume the channel and consider it "live".
+ let bob_channel_reestablish = nodes[1].node.get_and_clear_pending_msg_events().iter().find_map(|event|
+ if let MessageSendEvent::SendChannelReestablish { node_id, msg } = event {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ Some(msg.clone())
+ } else {
+ None
+ }
+ ).unwrap();
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bob_channel_reestablish);
+
+ // Sanity check that Alice won't disconnect Bob since she's no longer waiting for any messages.
+ for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
+ nodes[0].node.timer_tick_occurred();
+ check_disconnect_event(&nodes[0], false);
+ }
+
+ // However, Bob is still waiting on Alice's `RevokeAndACK`, so he should disconnect her after
+ // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
+ check_disconnect(&nodes[1]);
+
+ // Finally, have Bob process the last message.
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &alice_revoke_and_ack);
+ check_added_monitors(&nodes[1], 1);
+
+ // At this point, neither node should attempt to disconnect each other, since they aren't
+ // waiting on any messages.
+ for node in &nodes {
+ for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
+ node.node.timer_tick_occurred();
+ check_disconnect_event(node, false);
+ }
+ }
+}
/// payment_hash type, use to cross-lock hop
///
/// This is not exported to bindings users as we just use [u8; 32] directly
-#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
-#[cfg_attr(test, derive(PartialOrd, Ord))]
+#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug, Ord, PartialOrd)]
pub struct PaymentHash(pub [u8; 32]);
/// payment_preimage type, use to route payment between hop
///
/// This is not exported to bindings users as we just use [u8; 32] directly
-#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
-#[cfg_attr(test, derive(PartialOrd, Ord))]
+#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug, Ord, PartialOrd)]
pub struct PaymentPreimage(pub [u8; 32]);
/// payment_secret type, use to authenticate sender to the receiver and tie MPP HTLCs together
///
/// This is not exported to bindings users as we just use [u8; 32] directly
-#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
+#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug, Ord, PartialOrd)]
pub struct PaymentSecret(pub [u8; 32]);
use crate::prelude::*;
if let Event::SpendableOutputs { outputs } = spendable.pop().unwrap() {
assert_eq!(outputs.len(), 1);
let spend_tx = node.keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
- Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
+ Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &Secp256k1::new()).unwrap();
check_spends!(spend_tx, spendable_tx);
} else { panic!(); }
}
// old `ChannelMonitor` that did not exercise said rebroadcasting logic.
if check_old_monitor_retries_after_upgrade {
let serialized_monitor = hex::decode(
- "0101fffffffffffffffff9550f22c95100160014d5a9aa98b89acc215fc3d23d6fec0ad59ca3665f00002200204c5f18e5e95b184f34d02ba6de8a2a4e36ae3d4ec87299ad81f3284dc7195c6302d7dde8e10a5a22c9bd0d7ef5494d85683ac050253b917615d4f97af633f0a8e2035f5e9d58b4328566223c107d86cf853e6b9fae1d26ff6d969be0178d1423c4ea0016001467822698d782e8421ebdf96d010de99382b7ec2300160014caf6d80fe2bab80473b021f57588a9c384bf23170000000000000000000000004d49e5da0000000000000000000000000000002a0270b20ad0f2c2bb30a55590fc77778495bc1b38c96476901145dda57491237f0f74c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e00000022002034c0cc0ad0dd5fe61dcf7ef58f995e3d34f8dbd24aa2a6fae68fefe102bf025c21391732ce658e1fe167300bb689a81e7db5399b9ee4095e217b0e997e8dd3d17a0000000000000000004a002103adde8029d3ee281a32e9db929b39f503ff9d7e93cd308eb157955344dc6def84022103205087e2dc1f6b9937e887dfa712c5bdfa950b01dbda3ebac4c85efdde48ee6a04020090004752210307a78def56cba9fc4db22a25928181de538ee59ba1a475ae113af7790acd0db32103c21e841cbc0b48197d060c71e116c185fa0ac281b7d0aa5924f535154437ca3b52ae00000000000186a0ffffffffffff0291e7c0a3232fb8650a6b4089568a81062b48a768780e5a74bb4a4a74e33aec2c029d5760248ec86c4a76d9df8308555785a06a65472fb995f5b392d520bbd000650090c1c94b11625690c9d84c5daa67b6ad19fcc7f9f23e194384140b08fcab9e8e810000ffffffffffff000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000002391732ce658e1fe167300bb689a81e7db5399b9ee4095e217b0e997e8dd3d17a00000000000000010000000000009896800000005166687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f292505000000009c009900202d704fbfe342a9ff6eaca14d80a24aaed0e680bbbdd36157b6f2798c61d906910120f9fe5e552aa0fc45020f0505efde432a4e373e5d393863973a6899f8c26d33d10208000000000098968004494800210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23020500030241000408000001000000000006020000080800000000009896800a0400000046167c86cc0e598a6b541f7c9bf9ef17222e4a76f636e2d22185aeadd2b02d029c00000000000000000000000000000000000000000000000166687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925fffffffffffe01e3002004f8eda5676356f539169a8e9a1e86c7f125283328d6f4bded1b939b52a6a7e30108000000000000c299022103a1f98e85886df54add6908b4fc1ff515e44aedefe9eb9c02879c89994298fa79042103a650bf03971df0176c7b412247390ef717853e8bd487b204dccc2fe2078bb75206210390bbbcebe9f70ba5dfd98866a79f72f75e0a6ea550ef73b202dd87cd6477350a08210284152d57908488e666e872716a286eb670b3d06cbeebf3f2e4ad350e01ec5e5b0a2102295e2de39eb3dcc2882f8cc266df7882a8b6d2c32aa08799f49b693aad3be28e0c04000000fd0e00fd01fe002045cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d01080000000000009b5e0221035f5e9d58b4328566223c107d86cf853e6b9fae1d26ff6d969be0178d1423c4ea04210230fde9c031f487db95ff55b7c0acbe0c7c26a8d82615e9184416bd350101616706210225afb4e88eac8b47b67adeaf085f5eb5d37d936f56138f0848de3d104edf113208210208e4687a95c172b86b920c3bc5dbd5f023094ec2cb0abdb74f9b624f45740df90a2102d7dde8e10a5a22c9bd0d7ef5494d85683ac050253b917615d4f97af633f0a8e20c04000000fd0efd01193b00010102080000000000989680040400000051062066687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925080400000000417e2650c201383711eed2a7cb8652c3e77ee6a395e81849c5c222217ed68b333c0ca9f1e900662ae68a7359efa7ef9d90613f2a62f7c3ff90f8c25e2cc974c9d39c009900202d704fbfe342a9ff6eaca14d80a24aaed0e680bbbdd36157b6f2798c61d906910120f9fe5e552aa0fc45020f0505efde432a4e373e5d393863973a6899f8c26d33d10208000000000098968004494800210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23020500030241000408000001000000000006020000080800000000009896800a0400000046fffffffffffefffffffffffe000000000000000000000000000000000000000000000000ffe099e83ae3761c7f1b781d22613bd1f6977e9ad59fae12b3eba34462ee8a3d000000500000000000000002fd01da002045cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d01fd01840200000000010174c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e00000000000f55f9800310270000000000002200208309b406e3b96e76cde414fbb8f5159f5b25b24075656c6382cec797854d53495e9b0000000000002200204c5f18e5e95b184f34d02ba6de8a2a4e36ae3d4ec87299ad81f3284dc7195c6350c300000000000016001425df8ec4a074f80579fed67d4707d5ec8ed7e8d304004730440220671c9badf26bd3a1ebd2d17020c6be20587d7822530daacc52c28839875eaec602204b575a21729ed27311f6d79fdf6fe8702b0a798f7d842e39ede1b56f249a613401473044022016a0da36f70cbf5d889586af88f238982889dc161462c56557125c7acfcb69e9022036ae10c6cc8cbc3b27d9e9ef6babb556086585bc819f252208bd175286699fdd014752210307a78def56cba9fc4db22a25928181de538ee59ba1a475ae113af7790acd0db32103c21e841cbc0b48197d060c71e116c185fa0ac281b7d0aa5924f535154437ca3b52ae50c9222002040000000b03209452ca8c90d4c78928b80ec41398f2a890324d8ad6e6c81408a0cb9b8d977b070406030400020090fd02a1002045cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d01fd01840200000000010174c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e00000000000f55f9800310270000000000002200208309b406e3b96e76cde414fbb8f5159f5b25b24075656c6382cec797854d53495e9b0000000000002200204c5f18e5e95b184f34d02ba6de8a2a4e36ae3d4ec87299ad81f3284dc7195c6350c300000000000016001425df8ec4a074f80579fed67d4707d5ec8ed7e8d304004730440220671c9badf26bd3a1ebd2d17020c6be20587d7822530daacc52c28839875eaec602204b575a21729ed27311f6d79fdf6fe8702b0a798f7d842e39ede1b56f249a613401473044022016a0da36f70cbf5d889586af88f238982889dc161462c56557125c7acfcb69e9022036ae10c6cc8cbc3b27d9e9ef6babb556086585bc819f252208bd175286699fdd014752210307a78def56cba9fc4db22a25928181de538ee59ba1a475ae113af7790acd0db32103c21e841cbc0b48197d060c71e116c185fa0ac281b7d0aa5924f535154437ca3b52ae50c9222002040000000b03209452ca8c90d4c78928b80ec41398f2a890324d8ad6e6c81408a0cb9b8d977b0704cd01cb00c901c7002245cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d0001022102d7dde8e10a5a22c9bd0d7ef5494d85683ac050253b917615d4f97af633f0a8e204020090062b5e9b0000000000002200204c5f18e5e95b184f34d02ba6de8a2a4e36ae3d4ec87299ad81f3284dc7195c630821035f5e9d58b4328566223c107d86cf853e6b9fae1d26ff6d969be0178d1423c4ea0a200000000000000000000000004d49e5da0000000000000000000000000000002a0c0800000000000186a0000000000000000274c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e0000000000000001000000000022002034c0cc0ad0dd5fe61dcf7ef58f995e3d34f8dbd24aa2a6fae68fefe102bf025c45cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d000000000000000100000000002200208309b406e3b96e76cde414fbb8f5159f5b25b24075656c6382cec797854d5349010100160014d5a9aa98b89acc215fc3d23d6fec0ad59ca3665ffd027100fd01e6fd01e300080000fffffffffffe02080000000000009b5e0408000000000000c3500604000000fd08b0af002102d7dde8e10a5a22c9bd0d7ef5494d85683ac050253b917615d4f97af633f0a8e20221035f5e9d58b4328566223c107d86cf853e6b9fae1d26ff6d969be0178d1423c4ea04210230fde9c031f487db95ff55b7c0acbe0c7c26a8d82615e9184416bd350101616706210225afb4e88eac8b47b67adeaf085f5eb5d37d936f56138f0848de3d104edf113208210208e4687a95c172b86b920c3bc5dbd5f023094ec2cb0abdb74f9b624f45740df90acdcc00a8020000000174c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e00000000000f55f9800310270000000000002200208309b406e3b96e76cde414fbb8f5159f5b25b24075656c6382cec797854d53495e9b0000000000002200204c5f18e5e95b184f34d02ba6de8a2a4e36ae3d4ec87299ad81f3284dc7195c6350c300000000000016001425df8ec4a074f80579fed67d4707d5ec8ed7e8d350c92220022045cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d0c3c3b00010102080000000000989680040400000051062066687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f29250804000000000240671c9badf26bd3a1ebd2d17020c6be20587d7822530daacc52c28839875eaec64b575a21729ed27311f6d79fdf6fe8702b0a798f7d842e39ede1b56f249a613404010006407e2650c201383711eed2a7cb8652c3e77ee6a395e81849c5c222217ed68b333c0ca9f1e900662ae68a7359efa7ef9d90613f2a62f7c3ff90f8c25e2cc974c9d3010000000000000001010000000000000000090b2a953d93a124c600ecb1a0ccfed420169cdd37f538ad94a3e4e6318c93c14adf59cdfbb40bdd40950c9f8dd547d29d75a173e1376a7850743394c46dea2dfd01cefd01ca00fd017ffd017c00080000ffffffffffff0208000000000000c2990408000000000000c3500604000000fd08b0af002102295e2de39eb3dcc2882f8cc266df7882a8b6d2c32aa08799f49b693aad3be28e022103a1f98e85886df54add6908b4fc1ff515e44aedefe9eb9c02879c89994298fa79042103a650bf03971df0176c7b412247390ef717853e8bd487b204dccc2fe2078bb75206210390bbbcebe9f70ba5dfd98866a79f72f75e0a6ea550ef73b202dd87cd6477350a08210284152d57908488e666e872716a286eb670b3d06cbeebf3f2e4ad350e01ec5e5b0aa2a1007d020000000174c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e00000000000f55f9800299c2000000000000220020740e108cfbc93967b6ab242a351ebee7de51814cf78d366adefd78b10281f17e50c300000000000016001425df8ec4a074f80579fed67d4707d5ec8ed7e8d351c92220022004f8eda5676356f539169a8e9a1e86c7f125283328d6f4bded1b939b52a6a7e30c00024045cb2485594bb1ec08e7bb6af4f89c912bd53f006d7876ea956773e04a4aad4a40e2b8d4fc612102f0b54061b3c1239fb78783053e8e6f9d92b1b99f81ae9ec2040100060000fd019600b0af002103c21e841cbc0b48197d060c71e116c185fa0ac281b7d0aa5924f535154437ca3b02210270b20ad0f2c2bb30a55590fc77778495bc1b38c96476901145dda57491237f0f042103b4e59df102747edc3a3e2283b42b88a8c8218ffd0dcfb52f2524b371d64cadaa062103d902b7b8b3434076d2b210e912c76645048b71e28995aad227a465a65ccd817608210301e9a52f923c157941de4a7692e601f758660969dcf5abdb67817efe84cce2ef0202009004010106b7b600b0af00210307a78def56cba9fc4db22a25928181de538ee59ba1a475ae113af7790acd0db30221034d0f817cb19b4a3bd144b615459bd06cbab3b4bdc96d73e18549a992cee80e8104210380542b59a9679890cba529fe155a9508ef57dac7416d035b23666e3fb98c3814062103adde8029d3ee281a32e9db929b39f503ff9d7e93cd308eb157955344dc6def84082103205087e2dc1f6b9937e887dfa712c5bdfa950b01dbda3ebac4c85efdde48ee6a02020090082274c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e000000000287010108d30df34e3a1e00ecdd03a2c843db062479a81752c4dfd0cc4baef0f81e7bc7ef8820990daf8d8e8d30a3b4b08af12c9f5cd71e45c7238103e0c80ca13850862e4fd2c56b69b7195312518de1bfe9aed63c80bb7760d70b2a870d542d815895fd12423d11e2adb0cdf55d776dac8f487c9b3b7ea12f1b150eb15889cf41333ade465692bf1cdc360b9c2a19bf8c1ca4fed7639d8bc953d36c10d8c6c9a8c0a57608788979bcf145e61b308006896e21d03e92084f93bd78740c20639134a7a8fd019afd019600b0af002103c21e841cbc0b48197d060c71e116c185fa0ac281b7d0aa5924f535154437ca3b02210270b20ad0f2c2bb30a55590fc77778495bc1b38c96476901145dda57491237f0f042103b4e59df102747edc3a3e2283b42b88a8c8218ffd0dcfb52f2524b371d64cadaa062103d902b7b8b3434076d2b210e912c76645048b71e28995aad227a465a65ccd817608210301e9a52f923c157941de4a7692e601f758660969dcf5abdb67817efe84cce2ef0202009004010106b7b600b0af00210307a78def56cba9fc4db22a25928181de538ee59ba1a475ae113af7790acd0db30221034d0f817cb19b4a3bd144b615459bd06cbab3b4bdc96d73e18549a992cee80e8104210380542b59a9679890cba529fe155a9508ef57dac7416d035b23666e3fb98c3814062103adde8029d3ee281a32e9db929b39f503ff9d7e93cd308eb157955344dc6def84082103205087e2dc1f6b9937e887dfa712c5bdfa950b01dbda3ebac4c85efdde48ee6a02020090082274c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e000000000000000186a00000000000000000000000004d49e5da0000000000000000000000000000002a000000000000000001b77b61346a2a408afdb01743a2230cb36e55771a0790f67a0910e207fd223fc8000000000000000145cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d00000000041000080000000000989680020400000051160004000000510208000000000000000004040000000b000000000000000145cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d00000000b77b61346a2a408afdb01743a2230cb36e55771a0790f67a0910e207fd223fc80000005000000000000000000000000000000000000101300300050007010109210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c230d000f020000",
+ "0101fffffffffffffffff9550f22c95100160014d5a9aa98b89acc215fc3d23d6fec0ad59ca3665f00002200204c5f18e5e95b184f34d02ba6de8a2a4e36ae3d4ec87299ad81f3284dc7195c6302d7dde8e10a5a22c9bd0d7ef5494d85683ac050253b917615d4f97af633f0a8e2035f5e9d58b4328566223c107d86cf853e6b9fae1d26ff6d969be0178d1423c4ea0016001467822698d782e8421ebdf96d010de99382b7ec2300160014caf6d80fe2bab80473b021f57588a9c384bf23170000000000000000000000004d49e5da0000000000000000000000000000002a0270b20ad0f2c2bb30a55590fc77778495bc1b38c96476901145dda57491237f0f74c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e00000022002034c0cc0ad0dd5fe61dcf7ef58f995e3d34f8dbd24aa2a6fae68fefe102bf025c21391732ce658e1fe167300bb689a81e7db5399b9ee4095e217b0e997e8dd3d17a0000000000000000004a002103adde8029d3ee281a32e9db929b39f503ff9d7e93cd308eb157955344dc6def84022103205087e2dc1f6b9937e887dfa712c5bdfa950b01dbda3ebac4c85efdde48ee6a04020090004752210307a78def56cba9fc4db22a25928181de538ee59ba1a475ae113af7790acd0db32103c21e841cbc0b48197d060c71e116c185fa0ac281b7d0aa5924f535154437ca3b52ae00000000000186a0ffffffffffff0291e7c0a3232fb8650a6b4089568a81062b48a768780e5a74bb4a4a74e33aec2c029d5760248ec86c4a76d9df8308555785a06a65472fb995f5b392d520bbd000650090c1c94b11625690c9d84c5daa67b6ad19fcc7f9f23e194384140b08fcab9e8e810000ffffffffffff000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000002167c86cc0e598a6b541f7c9bf9ef17222e4a76f636e2d22185aeadd2b02d029c0000000000000000391732ce658e1fe167300bb689a81e7db5399b9ee4095e217b0e997e8dd3d17a00000000000000010000000000009896800000005166687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f29250500000000a0009d00202d704fbfe342a9ff6eaca14d80a24aaed0e680bbbdd36157b6f2798c61d906910120f9fe5e552aa0fc45020f0505efde432a4e373e5d393863973a6899f8c26d33d102080000000000989680044d4c00210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c2302090007000000000241000408000001000000000006020000080800000000009896800a04000000460000000000000000000000000000000166687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925fffffffffffe01e3002004f8eda5676356f539169a8e9a1e86c7f125283328d6f4bded1b939b52a6a7e30108000000000000c299022103a1f98e85886df54add6908b4fc1ff515e44aedefe9eb9c02879c89994298fa79042103a650bf03971df0176c7b412247390ef717853e8bd487b204dccc2fe2078bb75206210390bbbcebe9f70ba5dfd98866a79f72f75e0a6ea550ef73b202dd87cd6477350a08210284152d57908488e666e872716a286eb670b3d06cbeebf3f2e4ad350e01ec5e5b0a2102295e2de39eb3dcc2882f8cc266df7882a8b6d2c32aa08799f49b693aad3be28e0c04000000fd0e00fd0202002045cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d01080000000000009b5e0221035f5e9d58b4328566223c107d86cf853e6b9fae1d26ff6d969be0178d1423c4ea04210230fde9c031f487db95ff55b7c0acbe0c7c26a8d82615e9184416bd350101616706210225afb4e88eac8b47b67adeaf085f5eb5d37d936f56138f0848de3d104edf113208210208e4687a95c172b86b920c3bc5dbd5f023094ec2cb0abdb74f9b624f45740df90a2102d7dde8e10a5a22c9bd0d7ef5494d85683ac050253b917615d4f97af633f0a8e20c04000000fd0efd011d3b00010102080000000000989680040400000051062066687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925080400000000417e2650c201383711eed2a7cb8652c3e77ee6a395e81849c5c222217ed68b333c0ca9f1e900662ae68a7359efa7ef9d90613f2a62f7c3ff90f8c25e2cc974c9d3a0009d00202d704fbfe342a9ff6eaca14d80a24aaed0e680bbbdd36157b6f2798c61d906910120f9fe5e552aa0fc45020f0505efde432a4e373e5d393863973a6899f8c26d33d102080000000000989680044d4c00210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c2302090007000000000241000408000001000000000006020000080800000000009896800a0400000046fffffffffffefffffffffffe000000000000000000000000000000000000000000000000f1600ef6ea657b8d411d553516ae35cedfe86b0cd48d1f91b32772facbae757d0000000b0000000000000002fd01da002045cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d01fd01840200000000010174c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e00000000000f55f9800310270000000000002200208309b406e3b96e76cde414fbb8f5159f5b25b24075656c6382cec797854d53495e9b0000000000002200204c5f18e5e95b184f34d02ba6de8a2a4e36ae3d4ec87299ad81f3284dc7195c6350c300000000000016001425df8ec4a074f80579fed67d4707d5ec8ed7e8d304004730440220671c9badf26bd3a1ebd2d17020c6be20587d7822530daacc52c28839875eaec602204b575a21729ed27311f6d79fdf6fe8702b0a798f7d842e39ede1b56f249a613401473044022016a0da36f70cbf5d889586af88f238982889dc161462c56557125c7acfcb69e9022036ae10c6cc8cbc3b27d9e9ef6babb556086585bc819f252208bd175286699fdd014752210307a78def56cba9fc4db22a25928181de538ee59ba1a475ae113af7790acd0db32103c21e841cbc0b48197d060c71e116c185fa0ac281b7d0aa5924f535154437ca3b52ae50c9222002040000000b0320f1600ef6ea657b8d411d553516ae35cedfe86b0cd48d1f91b32772facbae757d0406030400020090fd02a1002045cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d01fd01840200000000010174c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e00000000000f55f9800310270000000000002200208309b406e3b96e76cde414fbb8f5159f5b25b24075656c6382cec797854d53495e9b0000000000002200204c5f18e5e95b184f34d02ba6de8a2a4e36ae3d4ec87299ad81f3284dc7195c6350c300000000000016001425df8ec4a074f80579fed67d4707d5ec8ed7e8d304004730440220671c9badf26bd3a1ebd2d17020c6be20587d7822530daacc52c28839875eaec602204b575a21729ed27311f6d79fdf6fe8702b0a798f7d842e39ede1b56f249a613401473044022016a0da36f70cbf5d889586af88f238982889dc161462c56557125c7acfcb69e9022036ae10c6cc8cbc3b27d9e9ef6babb556086585bc819f252208bd175286699fdd014752210307a78def56cba9fc4db22a25928181de538ee59ba1a475ae113af7790acd0db32103c21e841cbc0b48197d060c71e116c185fa0ac281b7d0aa5924f535154437ca3b52ae50c9222002040000000b0320f1600ef6ea657b8d411d553516ae35cedfe86b0cd48d1f91b32772facbae757d04cd01cb00c901c7002245cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d0001022102d7dde8e10a5a22c9bd0d7ef5494d85683ac050253b917615d4f97af633f0a8e204020090062b5e9b0000000000002200204c5f18e5e95b184f34d02ba6de8a2a4e36ae3d4ec87299ad81f3284dc7195c630821035f5e9d58b4328566223c107d86cf853e6b9fae1d26ff6d969be0178d1423c4ea0a200000000000000000000000004d49e5da0000000000000000000000000000002a0c0800000000000186a0000000000000000274c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e0000000000000001000000000022002034c0cc0ad0dd5fe61dcf7ef58f995e3d34f8dbd24aa2a6fae68fefe102bf025c45cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d000000000000000100000000002200208309b406e3b96e76cde414fbb8f5159f5b25b24075656c6382cec797854d5349010100160014d5a9aa98b89acc215fc3d23d6fec0ad59ca3665ffd027100fd01e6fd01e300080000fffffffffffe02080000000000009b5e0408000000000000c3500604000000fd08b0af002102d7dde8e10a5a22c9bd0d7ef5494d85683ac050253b917615d4f97af633f0a8e20221035f5e9d58b4328566223c107d86cf853e6b9fae1d26ff6d969be0178d1423c4ea04210230fde9c031f487db95ff55b7c0acbe0c7c26a8d82615e9184416bd350101616706210225afb4e88eac8b47b67adeaf085f5eb5d37d936f56138f0848de3d104edf113208210208e4687a95c172b86b920c3bc5dbd5f023094ec2cb0abdb74f9b624f45740df90acdcc00a8020000000174c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e00000000000f55f9800310270000000000002200208309b406e3b96e76cde414fbb8f5159f5b25b24075656c6382cec797854d53495e9b0000000000002200204c5f18e5e95b184f34d02ba6de8a2a4e36ae3d4ec87299ad81f3284dc7195c6350c300000000000016001425df8ec4a074f80579fed67d4707d5ec8ed7e8d350c92220022045cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d0c3c3b00010102080000000000989680040400000051062066687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f29250804000000000240671c9badf26bd3a1ebd2d17020c6be20587d7822530daacc52c28839875eaec64b575a21729ed27311f6d79fdf6fe8702b0a798f7d842e39ede1b56f249a613404010006407e2650c201383711eed2a7cb8652c3e77ee6a395e81849c5c222217ed68b333c0ca9f1e900662ae68a7359efa7ef9d90613f2a62f7c3ff90f8c25e2cc974c9d3010000000000000001010000000000000000090b2a953d93a124c600ecb1a0ccfed420169cdd37f538ad94a3e4e6318c93c14adf59cdfbb40bdd40950c9f8dd547d29d75a173e1376a7850743394c46dea2dfd01cefd01ca00fd017ffd017c00080000ffffffffffff0208000000000000c2990408000000000000c3500604000000fd08b0af002102295e2de39eb3dcc2882f8cc266df7882a8b6d2c32aa08799f49b693aad3be28e022103a1f98e85886df54add6908b4fc1ff515e44aedefe9eb9c02879c89994298fa79042103a650bf03971df0176c7b412247390ef717853e8bd487b204dccc2fe2078bb75206210390bbbcebe9f70ba5dfd98866a79f72f75e0a6ea550ef73b202dd87cd6477350a08210284152d57908488e666e872716a286eb670b3d06cbeebf3f2e4ad350e01ec5e5b0aa2a1007d020000000174c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e00000000000f55f9800299c2000000000000220020740e108cfbc93967b6ab242a351ebee7de51814cf78d366adefd78b10281f17e50c300000000000016001425df8ec4a074f80579fed67d4707d5ec8ed7e8d351c92220022004f8eda5676356f539169a8e9a1e86c7f125283328d6f4bded1b939b52a6a7e30c00024045cb2485594bb1ec08e7bb6af4f89c912bd53f006d7876ea956773e04a4aad4a40e2b8d4fc612102f0b54061b3c1239fb78783053e8e6f9d92b1b99f81ae9ec2040100060000fd019600b0af002103c21e841cbc0b48197d060c71e116c185fa0ac281b7d0aa5924f535154437ca3b02210270b20ad0f2c2bb30a55590fc77778495bc1b38c96476901145dda57491237f0f042103b4e59df102747edc3a3e2283b42b88a8c8218ffd0dcfb52f2524b371d64cadaa062103d902b7b8b3434076d2b210e912c76645048b71e28995aad227a465a65ccd817608210301e9a52f923c157941de4a7692e601f758660969dcf5abdb67817efe84cce2ef0202009004010106b7b600b0af00210307a78def56cba9fc4db22a25928181de538ee59ba1a475ae113af7790acd0db30221034d0f817cb19b4a3bd144b615459bd06cbab3b4bdc96d73e18549a992cee80e8104210380542b59a9679890cba529fe155a9508ef57dac7416d035b23666e3fb98c3814062103adde8029d3ee281a32e9db929b39f503ff9d7e93cd308eb157955344dc6def84082103205087e2dc1f6b9937e887dfa712c5bdfa950b01dbda3ebac4c85efdde48ee6a02020090082274c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e000000000287010108d30df34e3a1e00ecdd03a2c843db062479a81752c4dfd0cc4baef0f81e7bc7ef8820990daf8d8e8d30a3b4b08af12c9f5cd71e45c7238103e0c80ca13850862e4fd2c56b69b7195312518de1bfe9aed63c80bb7760d70b2a870d542d815895fd12423d11e2adb0cdf55d776dac8f487c9b3b7ea12f1b150eb15889cf41333ade465692bf1cdc360b9c2a19bf8c1ca4fed7639d8bc953d36c10d8c6c9a8c0a57608788979bcf145e61b308006896e21d03e92084f93bd78740c20639134a7a8fd019afd019600b0af002103c21e841cbc0b48197d060c71e116c185fa0ac281b7d0aa5924f535154437ca3b02210270b20ad0f2c2bb30a55590fc77778495bc1b38c96476901145dda57491237f0f042103b4e59df102747edc3a3e2283b42b88a8c8218ffd0dcfb52f2524b371d64cadaa062103d902b7b8b3434076d2b210e912c76645048b71e28995aad227a465a65ccd817608210301e9a52f923c157941de4a7692e601f758660969dcf5abdb67817efe84cce2ef0202009004010106b7b600b0af00210307a78def56cba9fc4db22a25928181de538ee59ba1a475ae113af7790acd0db30221034d0f817cb19b4a3bd144b615459bd06cbab3b4bdc96d73e18549a992cee80e8104210380542b59a9679890cba529fe155a9508ef57dac7416d035b23666e3fb98c3814062103adde8029d3ee281a32e9db929b39f503ff9d7e93cd308eb157955344dc6def84082103205087e2dc1f6b9937e887dfa712c5bdfa950b01dbda3ebac4c85efdde48ee6a02020090082274c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e000000000000000186a00000000000000000000000004d49e5da0000000000000000000000000000002a00000000000000000000000000000000000000000000000001000000510000000000000001000000000000000145cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d00000000041000080000000000989680020400000051160004000000510208000000000000000004040000000b0000000000000000000101300300050007010109210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c230d000f020000",
).unwrap();
reload_node!(nodes[0], &nodes[0].node.encode(), &[&serialized_monitor], persister, new_chain_monitor, node_deserialized);
}
// revoked outputs.
{
let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
- assert_eq!(txn.len(), 2);
+ assert_eq!(txn.len(), 4);
- let (revoked_claim_a, revoked_claim_b) = if txn[0].input[0].previous_output.txid == revoked_commitment_a.txid() {
- (&txn[0], &txn[1])
+ let (revoked_htlc_claim_a, revoked_htlc_claim_b) = if txn[0].input[0].previous_output.txid == revoked_commitment_a.txid() {
+ (if txn[0].input.len() == 2 { &txn[0] } else { &txn[1] }, if txn[2].input.len() == 2 { &txn[2] } else { &txn[3] })
} else {
- (&txn[1], &txn[0])
+ (if txn[2].input.len() == 2 { &txn[2] } else { &txn[3] }, if txn[0].input.len() == 2 { &txn[0] } else { &txn[1] })
};
- // TODO: to_self claim must be separate from HTLC claims
- assert_eq!(revoked_claim_a.input.len(), 3); // Spends both HTLC outputs and to_self output
- assert_eq!(revoked_claim_a.output.len(), 1);
- check_spends!(revoked_claim_a, revoked_commitment_a);
- assert_eq!(revoked_claim_b.input.len(), 3); // Spends both HTLC outputs and to_self output
- assert_eq!(revoked_claim_b.output.len(), 1);
- check_spends!(revoked_claim_b, revoked_commitment_b);
+ assert_eq!(revoked_htlc_claim_a.input.len(), 2); // Spends both HTLC outputs
+ assert_eq!(revoked_htlc_claim_a.output.len(), 1);
+ check_spends!(revoked_htlc_claim_a, revoked_commitment_a);
+ assert_eq!(revoked_htlc_claim_b.input.len(), 2); // Spends both HTLC outputs
+ assert_eq!(revoked_htlc_claim_b.output.len(), 1);
+ check_spends!(revoked_htlc_claim_b, revoked_commitment_b);
}
// Since Bob was able to confirm his revoked commitment, he'll now try to claim the HTLCs
// the second level instead.
let revoked_claims = {
let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
- assert_eq!(txn.len(), 4);
-
- let revoked_to_self_claim_a = txn.iter().find(|tx|
- tx.input.len() == 1 &&
- tx.output.len() == 1 &&
- tx.input[0].previous_output.txid == revoked_commitment_a.txid()
- ).unwrap();
- check_spends!(revoked_to_self_claim_a, revoked_commitment_a);
-
- let revoked_to_self_claim_b = txn.iter().find(|tx|
- tx.input.len() == 1 &&
- tx.output.len() == 1 &&
- tx.input[0].previous_output.txid == revoked_commitment_b.txid()
- ).unwrap();
- check_spends!(revoked_to_self_claim_b, revoked_commitment_b);
+ assert_eq!(txn.len(), 2);
let revoked_htlc_claims = txn.iter().filter(|tx|
tx.input.len() == 2 &&
assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
let spendable_output_events = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
- assert_eq!(spendable_output_events.len(), 4);
+ assert_eq!(spendable_output_events.len(), 2);
for (idx, event) in spendable_output_events.iter().enumerate() {
if let Event::SpendableOutputs { outputs } = event {
assert_eq!(outputs.len(), 1);
let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs(
- &[&outputs[0]], Vec::new(), Script::new_op_return(&[]), 253, &Secp256k1::new(),
+ &[&outputs[0]], Vec::new(), Script::new_op_return(&[]), 253, None, &Secp256k1::new(),
).unwrap();
check_spends!(spend_tx, revoked_claims[idx]);
} else {
assert!(nodes[0].node.list_channels().is_empty());
assert!(nodes[1].node.list_channels().is_empty());
- assert!(nodes[0].chain_monitor.chain_monitor.get_claimable_balances(&[]).is_empty());
+ // On the Alice side, the individual to_self_claim are still pending confirmation.
+ assert_eq!(nodes[0].chain_monitor.chain_monitor.get_claimable_balances(&[]).len(), 2);
// TODO: From Bob's PoV, he still thinks he can claim the outputs from his revoked commitment.
// This needs to be fixed before we enable pruning `ChannelMonitor`s once they don't have any
// balances to claim.
/// An error message which we should make an effort to send before we disconnect.
msg: Option<ErrorMessage>
},
+ /// The peer did something incorrect. Tell them without closing any channels and disconnect them.
+ DisconnectPeerWithWarning {
+ /// A warning message which we should make an effort to send before we disconnect.
+ msg: WarningMessage,
+ },
/// The peer did something harmless that we weren't able to process, just log and ignore
// New code should *not* use this. New code must use IgnoreAndLog, below!
IgnoreError,
(3, remote_network_address, option)
});
Ok(Init {
- features: features.or(global_features),
+ features: features | global_features,
remote_network_address,
})
}
// break the first (non-final) hop payload by swapping the realm (0) byte for a byte
// describing a length-1 TLV payload, which is obviously bogus.
new_payloads[0].data[0] = 1;
- msg.onion_routing_packet = onion_utils::construct_onion_packet_with_writable_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash);
+ msg.onion_routing_packet = onion_utils::construct_onion_packet_with_writable_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
}, ||{}, true, Some(PERM|22), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id));
// final node failure
// break the last-hop payload by swapping the realm (0) byte for a byte describing a
// length-1 TLV payload, which is obviously bogus.
new_payloads[1].data[0] = 1;
- msg.onion_routing_packet = onion_utils::construct_onion_packet_with_writable_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash);
+ msg.onion_routing_packet = onion_utils::construct_onion_packet_with_writable_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
}, ||{}, false, Some(PERM|22), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id));
// the following three with run_onion_failure_test_with_fail_intercept() test only the origin node
let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
let (onion_payloads, _, htlc_cltv) = onion_utils::build_onion_payloads(
&route.paths[0], 40000, RecipientOnionFields::spontaneous_empty(), height, &None).unwrap();
- let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
+ let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
msg.cltv_expiry = htlc_cltv;
msg.onion_routing_packet = onion_packet;
}, ||{}, true, Some(21), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id));
(get_route(
&$nodes[0].node.get_our_node_id(), &payment_params, &network_graph,
Some(&$nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()),
- $amt, $nodes[0].logger, &scorer, &[0u8; 32]
+ $amt, $nodes[0].logger, &scorer, &(), &[0u8; 32]
).unwrap(), phantom_route_hint.phantom_scid)
}
}}
onion_keys.remove(0);
onion_payloads.remove(0);
- let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
+ let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
onion_packet.hop_data = new_onion_packet.hop_data;
onion_packet.hmac = new_onion_packet.hmac;
},
}
}
-pub(super) fn route_size_insane(payloads: &Vec<msgs::OnionHopData>) -> bool {
- let mut len = 0;
- for payload in payloads.iter() {
- let mut payload_len = LengthCalculatingWriter(0);
- payload.write(&mut payload_len).expect("Failed to calculate length");
- assert!(payload_len.0 + 32 < ONION_DATA_LEN);
- len += payload_len.0 + 32;
- if len > ONION_DATA_LEN {
- return true;
- }
- }
- false
-}
-
-/// panics if route_size_insane(payloads)
-pub(super) fn construct_onion_packet(payloads: Vec<msgs::OnionHopData>, onion_keys: Vec<OnionKeys>, prng_seed: [u8; 32], associated_data: &PaymentHash) -> msgs::OnionPacket {
+pub(super) fn construct_onion_packet(payloads: Vec<msgs::OnionHopData>, onion_keys: Vec<OnionKeys>, prng_seed: [u8; 32], associated_data: &PaymentHash) -> Result<msgs::OnionPacket, ()> {
let mut packet_data = [0; ONION_DATA_LEN];
let mut chacha = ChaCha20::new(&prng_seed, &[0; 8]);
#[cfg(test)]
/// Used in testing to write bogus `BogusOnionHopData` as well as `RawOnionHopData`, which is
/// otherwise not representable in `msgs::OnionHopData`.
-pub(super) fn construct_onion_packet_with_writable_hopdata<HD: Writeable>(payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, prng_seed: [u8; 32], associated_data: &PaymentHash) -> msgs::OnionPacket {
+pub(super) fn construct_onion_packet_with_writable_hopdata<HD: Writeable>(payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, prng_seed: [u8; 32], associated_data: &PaymentHash) -> Result<msgs::OnionPacket, ()> {
let mut packet_data = [0; ONION_DATA_LEN];
let mut chacha = ChaCha20::new(&prng_seed, &[0; 8]);
payloads.iter().map(|p| p.serialized_length() + 32 /* HMAC */).sum()
}
-/// panics if payloads_serialized_length(payloads) > packet_data_len
pub(crate) fn construct_onion_message_packet<HD: Writeable, P: Packet<Data = Vec<u8>>>(
- payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, prng_seed: [u8; 32], packet_data_len: usize) -> P
+ payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, prng_seed: [u8; 32], packet_data_len: usize) -> Result<P, ()>
{
let mut packet_data = vec![0; packet_data_len];
construct_onion_packet_with_init_noise::<_, _>(payloads, onion_keys, packet_data, None)
}
-/// panics if payloads_serialized_length(payloads) > packet_data.len()
fn construct_onion_packet_with_init_noise<HD: Writeable, P: Packet>(
- mut payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, mut packet_data: P::Data, associated_data: Option<&PaymentHash>) -> P
+ mut payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, mut packet_data: P::Data, associated_data: Option<&PaymentHash>) -> Result<P, ()>
{
let filler = {
let packet_data = packet_data.as_mut();
let mut payload_len = LengthCalculatingWriter(0);
payload.write(&mut payload_len).expect("Failed to calculate length");
pos += payload_len.0 + 32;
- assert!(pos <= packet_data.len());
+ if pos > packet_data.len() {
+ return Err(());
+ }
res.resize(pos, 0u8);
chacha.process_in_place(&mut res);
chacha.process_in_place(packet_data);
if i == 0 {
- let onion_data_len = packet_data.len();
- packet_data[onion_data_len - filler.len()..onion_data_len].copy_from_slice(&filler[..]);
+ let stop_index = packet_data.len();
+ let start_index = stop_index.checked_sub(filler.len()).ok_or(())?;
+ packet_data[start_index..stop_index].copy_from_slice(&filler[..]);
}
let mut hmac = HmacEngine::<Sha256>::new(&keys.mu);
hmac_res = Hmac::from_engine(hmac).into_inner();
}
- P::new(onion_keys.first().unwrap().ephemeral_pubkey, packet_data, hmac_res)
+ Ok(P::new(onion_keys.first().unwrap().ephemeral_pubkey, packet_data, hmac_res))
}
/// Encrypts a failure packet. raw_packet can either be a
let pad_keytype_seed = super::gen_pad_from_shared_secret(&get_test_session_key().secret_bytes());
- let packet: msgs::OnionPacket = super::construct_onion_packet_with_writable_hopdata::<_>(payloads, onion_keys, pad_keytype_seed, &PaymentHash([0x42; 32]));
+ let packet: msgs::OnionPacket = super::construct_onion_packet_with_writable_hopdata::<_>(payloads, onion_keys, pad_keytype_seed, &PaymentHash([0x42; 32])).unwrap();
assert_eq!(packet.encode(), hex::decode("0002EEC7245D6B7D2CCB30380BFBE2A3648CD7A942653F5AA340EDCEA1F283686619F7F3416A5AA36DC7EEB3EC6D421E9615471AB870A33AC07FA5D5A51DF0A8823AABE3FEA3F90D387529D4F72837F9E687230371CCD8D263072206DBED0234F6505E21E282ABD8C0E4F5B9FF8042800BBAB065036EADD0149B37F27DDE664725A49866E052E809D2B0198AB9610FAA656BBF4EC516763A59F8F42C171B179166BA38958D4F51B39B3E98706E2D14A2DAFD6A5DF808093ABFCA5AEAACA16EDED5DB7D21FB0294DD1A163EDF0FB445D5C8D7D688D6DD9C541762BF5A5123BF9939D957FE648416E88F1B0928BFA034982B22548E1A4D922690EECF546275AFB233ACF4323974680779F1A964CFE687456035CC0FBA8A5428430B390F0057B6D1FE9A8875BFA89693EEB838CE59F09D207A503EE6F6299C92D6361BC335FCBF9B5CD44747AADCE2CE6069CFDC3D671DAEF9F8AE590CF93D957C9E873E9A1BC62D9640DC8FC39C14902D49A1C80239B6C5B7FD91D05878CBF5FFC7DB2569F47C43D6C0D27C438ABFF276E87364DEB8858A37E5A62C446AF95D8B786EAF0B5FCF78D98B41496794F8DCAAC4EEF34B2ACFB94C7E8C32A9E9866A8FA0B6F2A06F00A1CCDE569F97EEC05C803BA7500ACC96691D8898D73D8E6A47B8F43C3D5DE74458D20EDA61474C426359677001FBD75A74D7D5DB6CB4FEB83122F133206203E4E2D293F838BF8C8B3A29ACB321315100B87E80E0EDB272EE80FDA944E3FB6084ED4D7F7C7D21C69D9DA43D31A90B70693F9B0CC3EAC74C11AB8FF655905688916CFA4EF0BD04135F2E50B7C689A21D04E8E981E74C6058188B9B1F9DFC3EEC6838E9FFBCF22CE738D8A177C19318DFFEF090CEE67E12DE1A3E2A39F61247547BA5257489CBC11D7D91ED34617FCC42F7A9DA2E3CF31A94A210A1018143173913C38F60E62B24BF0D7518F38B5BAB3E6A1F8AEB35E31D6442C8ABB5178EFC892D2E787D79C6AD9E2FC271792983FA9955AC4D1D84A36C024071BC6E431B625519D556AF38185601F70E29035EA6A09C8B676C9D88CF7E05E0F17098B584C4168735940263F940033A220F40BE4C85344128B14BEB9E75696DB37014107801A59B13E89CD9D2258C169D523BE6D31552C44C82FF4BB18EC9F099F3BF0E5B1BB2BA9A87D7E26F98D294927B600B5529C47E04D98956677CBCEE8FA2B60F49776D8B8C367465B7C626DA53700684FB6C918EAD0EAB8360E4F60EDD25B4F43816A75ECF70F909301825B512469F8389D79402311D8AECB7B3EF8599E79485A4388D87744D899F7C47EE644361E17040A7958C8911BE6F463AB6A9B2AFACD688EC55EF517B38F1339EFC54487232798BB25522FF4572FF68567FE830F92F7B8113EFCE3E98C3FFFBAEDCE4FD8B50E41DA97C0C08E423A72689CC68E68F752A5E3A9003E64E35C957CA2E1C48BB6F64B05F56B70B575AD2F278D57850A7AD568C24A4D32A3D74B29F03DC125488BC7C637DA582357F40B0A52D16B3B40BB2C2315D03360BC24209E20972C200566BCF3BBE5C5B0AEDD83132A8A4D5B4242BA370B6D67D9B67EB01052D132C7866B9CB502E44796D9D356E4E3CB47CC527322CD24976FE7C9257A2864151A38E568EF7A79F10D6EF27CC04CE382347A2488B1F404FDBF407FE1CA1C9D0D5649E34800E25E18951C98CAE9F43555EEF65FEE1EA8F15828807366C3B612CD5753BF9FB8FCED08855F742CDDD6F765F74254F03186683D646E6F09AC2805586C7CF11998357CAFC5DF3F285329366F475130C928B2DCEBA4AA383758E7A9D20705C4BB9DB619E2992F608A1BA65DB254BB389468741D0502E2588AEB54390AC600C19AF5C8E61383FC1BEBE0029E4474051E4EF908828DB9CCA13277EF65DB3FD47CCC2179126AAEFB627719F421E20").unwrap());
}
/// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
/// [`Event::PaymentPathFailed`]: crate::events::Event::PaymentPathFailed
/// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed
-#[derive(Clone, Debug)]
+#[derive(Clone, Debug, PartialEq, Eq)]
pub enum RetryableSendFailure {
/// The provided [`PaymentParameters::expiry_time`] indicated that the payment has expired. Note
/// that this error is *not* caused by [`Retry::Timeout`].
use crate::util::ser::Writeable;
use crate::util::string::UntrustedString;
-use bitcoin::{Block, BlockHeader, TxMerkleNode};
-use bitcoin::hashes::Hash;
use bitcoin::network::constants::Network;
use crate::prelude::*;
check_added_monitors!(nodes[1], 1);
expect_payment_claimed!(nodes[1], payment_hash, 10_000_000);
- let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
- connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone()]});
+ connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[1].clone()]));
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
assert_eq!(claim_txn.len(), 1);
check_spends!(claim_txn[0], node_txn[1]);
- header.prev_blockhash = nodes[0].best_block_hash();
- connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone()]});
+ connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[1].clone()]));
if confirm_commitment_tx {
connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
}
- header.prev_blockhash = nodes[0].best_block_hash();
- let claim_block = Block { header, txdata: if payment_timeout { timeout_txn } else { vec![claim_txn[0].clone()] } };
+ let claim_block = create_dummy_block(nodes[0].best_block_hash(), 42, if payment_timeout { timeout_txn } else { vec![claim_txn[0].clone()] });
if payment_timeout {
assert!(confirm_commitment_tx); // Otherwise we're spending below our CSV!
let route = get_route(
&nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(),
Some(&nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()),
- amt_msat, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
+ amt_msat, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
nodes[0].node.send_payment_with_route(&route, payment_hash,
RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
check_added_monitors!(nodes[0], 1);
let route = get_route(
&nodes[0].node.get_our_node_id(), &route_params.payment_params,
&nodes[0].network_graph.read_only(), None, route_params.final_value_msat,
- nodes[0].logger, &scorer, &random_seed_bytes,
+ nodes[0].logger, &scorer, &(), &random_seed_bytes,
).unwrap();
let (payment_hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap();
_ => panic!("Unexpected event")
}
} else if test == InterceptTest::Timeout {
- let mut block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
- txdata: vec![],
- };
+ let mut block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
connect_block(&nodes[0], &block);
connect_block(&nodes[1], &block);
for _ in 0..TEST_FINAL_CLTV {
use crate::util::ser::{VecWriter, Writeable, Writer};
use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor,NextNoiseStep};
use crate::ln::wire;
-use crate::ln::wire::Encode;
+use crate::ln::wire::{Encode, Type};
use crate::onion_message::{CustomOnionMessageContents, CustomOnionMessageHandler, SimpleArcOnionMessenger, SimpleRefOnionMessenger};
use crate::routing::gossip::{NetworkGraph, P2PGossipSync, NodeId, NodeAlias};
use crate::util::atomic_counter::AtomicCounter;
use crate::io;
use alloc::collections::LinkedList;
use crate::sync::{Arc, Mutex, MutexGuard, FairRwLock};
-use core::sync::atomic::{AtomicBool, AtomicU32, Ordering};
+use core::sync::atomic::{AtomicBool, AtomicU32, AtomicI32, Ordering};
use core::{cmp, hash, fmt, mem};
use core::ops::Deref;
use core::convert::Infallible;
/// in the process. Each message is paired with the node id of the intended recipient. If no
/// connection to the node exists, then the message is simply not sent.
fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)>;
+
+ /// Gets the node feature flags which this handler itself supports. All available handlers are
+ /// queried similarly and their feature flags are OR'd together to form the [`NodeFeatures`]
+ /// which are broadcasted in our [`NodeAnnouncement`] message.
+ ///
+ /// [`NodeAnnouncement`]: crate::ln::msgs::NodeAnnouncement
+ fn provided_node_features(&self) -> NodeFeatures;
+
+ /// Gets the init feature flags which should be sent to the given peer. All available handlers
+ /// are queried similarly and their feature flags are OR'd together to form the [`InitFeatures`]
+ /// which are sent in our [`Init`] message.
+ ///
+ /// [`Init`]: crate::ln::msgs::Init
+ fn provided_init_features(&self, their_node_id: &PublicKey) -> InitFeatures;
}
/// A dummy struct which implements `RoutingMessageHandler` without storing any routing information
}
fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> { Vec::new() }
+
+ fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
+
+ fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
+ InitFeatures::empty()
+ }
}
/// A dummy struct which implements `ChannelMessageHandler` without having any channels.
/// lock held. Entries may be added with only the `peers` read lock held (though the
/// `Descriptor` value must already exist in `peers`).
node_id_to_descriptor: Mutex<HashMap<PublicKey, Descriptor>>,
- /// We can only have one thread processing events at once, but we don't usually need the full
- /// `peers` write lock to do so, so instead we block on this empty mutex when entering
- /// `process_events`.
- event_processing_lock: Mutex<()>,
- /// Because event processing is global and always does all available work before returning,
- /// there is no reason for us to have many event processors waiting on the lock at once.
- /// Instead, we limit the total blocked event processors to always exactly one by setting this
- /// when an event process call is waiting.
- blocked_event_processors: AtomicBool,
+ /// We can only have one thread processing events at once, but if a second call to
+ /// `process_events` happens while a first call is in progress, one of the two calls needs to
+ /// start from the top to ensure any new messages are also handled.
+ ///
+ /// Because the event handler calls into user code which may block, we don't want to block a
+ /// second thread waiting for another thread to handle events which is then blocked on user
+ /// code, so we store an atomic counter here:
+ /// * 0 indicates no event processor is running
+ /// * 1 indicates an event processor is running
+ /// * > 1 indicates an event processor is running but needs to start again from the top once
+ /// it finishes as another thread tried to start processing events but returned early.
+ event_processing_state: AtomicI32,
/// Used to track the last value sent in a node_announcement "timestamp" field. We ensure this
/// value increases strictly since we don't assume access to a time source.
message_handler,
peers: FairRwLock::new(HashMap::new()),
node_id_to_descriptor: Mutex::new(HashMap::new()),
- event_processing_lock: Mutex::new(()),
- blocked_event_processors: AtomicBool::new(false),
+ event_processing_state: AtomicI32::new(0),
ephemeral_key_midstate,
peer_counter: AtomicCounter::new(),
gossip_processing_backlogged: AtomicBool::new(false),
SecretKey::from_slice(&Sha256::from_engine(ephemeral_hash).into_inner()).expect("You broke SHA-256!")
}
+ fn init_features(&self, their_node_id: &PublicKey) -> InitFeatures {
+ self.message_handler.chan_handler.provided_init_features(their_node_id)
+ | self.message_handler.route_handler.provided_init_features(their_node_id)
+ | self.message_handler.onion_message_handler.provided_init_features(their_node_id)
+ | self.message_handler.custom_message_handler.provided_init_features(their_node_id)
+ }
+
/// Indicates a new outbound connection has been established to a node with the given `node_id`
/// and an optional remote network address.
///
Ok(x) => x,
Err(e) => {
match e.action {
- msgs::ErrorAction::DisconnectPeer { msg: _ } => {
- //TODO: Try to push msg
+ msgs::ErrorAction::DisconnectPeer { .. } => {
+ // We may have an `ErrorMessage` to send to the peer,
+ // but writing to the socket while reading can lead to
+ // re-entrant code and possibly unexpected behavior. The
+ // message send is optimistic anyway, and in this case
+ // we immediately disconnect the peer.
+ log_debug!(self.logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err);
+ return Err(PeerHandleError { });
+ },
+ msgs::ErrorAction::DisconnectPeerWithWarning { .. } => {
+ // We have a `WarningMessage` to send to the peer, but
+ // writing to the socket while reading can lead to
+ // re-entrant code and possibly unexpected behavior. The
+ // message send is optimistic anyway, and in this case
+ // we immediately disconnect the peer.
log_debug!(self.logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err);
return Err(PeerHandleError { });
},
peer.set_their_node_id(their_node_id);
insert_node_id!();
- let features = self.message_handler.chan_handler.provided_init_features(&their_node_id)
- .or(self.message_handler.route_handler.provided_init_features(&their_node_id))
- .or(self.message_handler.onion_message_handler.provided_init_features(&their_node_id));
+ let features = self.init_features(&their_node_id);
let resp = msgs::Init { features, remote_network_address: filter_addresses(peer.their_net_address.clone()) };
self.enqueue_message(peer, &resp);
peer.awaiting_pong_timer_tick_intervals = 0;
peer.pending_read_is_header = true;
peer.set_their_node_id(their_node_id);
insert_node_id!();
- let features = self.message_handler.chan_handler.provided_init_features(&their_node_id)
- .or(self.message_handler.route_handler.provided_init_features(&their_node_id))
- .or(self.message_handler.onion_message_handler.provided_init_features(&their_node_id));
+ let features = self.init_features(&their_node_id);
let resp = msgs::Init { features, remote_network_address: filter_addresses(peer.their_net_address.clone()) };
self.enqueue_message(peer, &resp);
peer.awaiting_pong_timer_tick_intervals = 0;
Ok(x) => x,
Err(e) => {
match e {
- // Note that to avoid recursion we never call
+ // Note that to avoid re-entrancy we never call
// `do_attempt_write_data` from here, causing
// the messages enqueued here to not actually
// be sent before the peer is disconnected.
});
continue;
}
- (msgs::DecodeError::UnknownRequiredFeature, ty) => {
- log_gossip!(self.logger, "Received a message with an unknown required feature flag or TLV, you may want to update!");
- self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: format!("Received an unknown required feature/TLV in message type {:?}", ty) });
+ (msgs::DecodeError::UnknownRequiredFeature, _) => {
+ log_debug!(self.logger, "Received a message with an unknown required feature flag or TLV, you may want to update!");
return Err(PeerHandleError { });
}
(msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { }),
// Need an Init as first message
if let wire::Message::Init(msg) = message {
- if msg.features.requires_unknown_bits() {
- log_debug!(self.logger, "Peer features required unknown version bits");
+ let our_features = self.init_features(&their_node_id);
+ if msg.features.requires_unknown_bits_from(&our_features) {
+ log_debug!(self.logger, "Peer requires features unknown to us");
+ return Err(PeerHandleError { }.into());
+ }
+
+ if our_features.requires_unknown_bits_from(&msg.features) {
+ log_debug!(self.logger, "We require features unknown to our peer");
return Err(PeerHandleError { }.into());
}
+
if peer_lock.their_features.is_some() {
return Err(PeerHandleError { }.into());
}
/// [`ChannelManager::process_pending_htlc_forwards`]: crate::ln::channelmanager::ChannelManager::process_pending_htlc_forwards
/// [`send_data`]: SocketDescriptor::send_data
pub fn process_events(&self) {
- let mut _single_processor_lock = self.event_processing_lock.try_lock();
- if _single_processor_lock.is_err() {
- // While we could wake the older sleeper here with a CV and make more even waiting
- // times, that would be a lot of overengineering for a simple "reduce total waiter
- // count" goal.
- match self.blocked_event_processors.compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) {
- Err(val) => {
- debug_assert!(val, "compare_exchange failed spuriously?");
- return;
- },
- Ok(val) => {
- debug_assert!(!val, "compare_exchange succeeded spuriously?");
- // We're the only waiter, as the running process_events may have emptied the
- // pending events "long" ago and there are new events for us to process, wait until
- // its done and process any leftover events before returning.
- _single_processor_lock = Ok(self.event_processing_lock.lock().unwrap());
- self.blocked_event_processors.store(false, Ordering::Release);
- }
- }
+ if self.event_processing_state.fetch_add(1, Ordering::AcqRel) > 0 {
+ // If we're not the first event processor to get here, just return early, the increment
+ // we just did will be treated as "go around again" at the end.
+ return;
}
- self.update_gossip_backlogged();
- let flush_read_disabled = self.gossip_processing_backlog_lifted.swap(false, Ordering::Relaxed);
-
- let mut peers_to_disconnect = HashMap::new();
- let mut events_generated = self.message_handler.chan_handler.get_and_clear_pending_msg_events();
- events_generated.append(&mut self.message_handler.route_handler.get_and_clear_pending_msg_events());
+ loop {
+ self.update_gossip_backlogged();
+ let flush_read_disabled = self.gossip_processing_backlog_lifted.swap(false, Ordering::Relaxed);
- {
- // TODO: There are some DoS attacks here where you can flood someone's outbound send
- // buffer by doing things like announcing channels on another node. We should be willing to
- // drop optional-ish messages when send buffers get full!
+ let mut peers_to_disconnect = HashMap::new();
+ let mut events_generated = self.message_handler.chan_handler.get_and_clear_pending_msg_events();
+ events_generated.append(&mut self.message_handler.route_handler.get_and_clear_pending_msg_events());
- let peers_lock = self.peers.read().unwrap();
- let peers = &*peers_lock;
- macro_rules! get_peer_for_forwarding {
- ($node_id: expr) => {
- {
- if peers_to_disconnect.get($node_id).is_some() {
- // If we've "disconnected" this peer, do not send to it.
- continue;
- }
- let descriptor_opt = self.node_id_to_descriptor.lock().unwrap().get($node_id).cloned();
- match descriptor_opt {
- Some(descriptor) => match peers.get(&descriptor) {
- Some(peer_mutex) => {
- let peer_lock = peer_mutex.lock().unwrap();
- if !peer_lock.handshake_complete() {
+ {
+ // TODO: There are some DoS attacks here where you can flood someone's outbound send
+ // buffer by doing things like announcing channels on another node. We should be willing to
+ // drop optional-ish messages when send buffers get full!
+
+ let peers_lock = self.peers.read().unwrap();
+ let peers = &*peers_lock;
+ macro_rules! get_peer_for_forwarding {
+ ($node_id: expr) => {
+ {
+ if peers_to_disconnect.get($node_id).is_some() {
+ // If we've "disconnected" this peer, do not send to it.
+ continue;
+ }
+ let descriptor_opt = self.node_id_to_descriptor.lock().unwrap().get($node_id).cloned();
+ match descriptor_opt {
+ Some(descriptor) => match peers.get(&descriptor) {
+ Some(peer_mutex) => {
+ let peer_lock = peer_mutex.lock().unwrap();
+ if !peer_lock.handshake_complete() {
+ continue;
+ }
+ peer_lock
+ },
+ None => {
+ debug_assert!(false, "Inconsistent peers set state!");
continue;
}
- peer_lock
},
None => {
- debug_assert!(false, "Inconsistent peers set state!");
continue;
- }
- },
- None => {
- continue;
- },
+ },
+ }
}
}
}
- }
- for event in events_generated.drain(..) {
- match event {
- MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendAcceptChannel event in peer_handler for node {} for channel {}",
- log_pubkey!(node_id),
- log_bytes!(msg.temporary_channel_id));
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::SendAcceptChannelV2 { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendAcceptChannelV2 event in peer_handler for node {} for channel {}",
- log_pubkey!(node_id),
- log_bytes!(msg.temporary_channel_id));
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
- log_pubkey!(node_id),
- log_bytes!(msg.temporary_channel_id));
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::SendOpenChannelV2 { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}",
- log_pubkey!(node_id),
- log_bytes!(msg.temporary_channel_id));
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
- log_pubkey!(node_id),
- log_bytes!(msg.temporary_channel_id),
- log_funding_channel_id!(msg.funding_txid, msg.funding_output_index));
- // TODO: If the peer is gone we should generate a DiscardFunding event
- // indicating to the wallet that they should just throw away this funding transaction
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::SendFundingSigned { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendFundingSigned event in peer_handler for node {} for channel {}",
- log_pubkey!(node_id),
- log_bytes!(msg.channel_id));
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::SendChannelReady { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendChannelReady event in peer_handler for node {} for channel {}",
- log_pubkey!(node_id),
- log_bytes!(msg.channel_id));
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendTxAddInput event in peer_handler for node {} for channel {}",
- log_pubkey!(node_id),
- log_bytes!(msg.channel_id));
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendTxAddOutput event in peer_handler for node {} for channel {}",
- log_pubkey!(node_id),
- log_bytes!(msg.channel_id));
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendTxRemoveInput event in peer_handler for node {} for channel {}",
- log_pubkey!(node_id),
- log_bytes!(msg.channel_id));
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendTxRemoveOutput event in peer_handler for node {} for channel {}",
- log_pubkey!(node_id),
- log_bytes!(msg.channel_id));
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::SendTxComplete { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendTxComplete event in peer_handler for node {} for channel {}",
- log_pubkey!(node_id),
- log_bytes!(msg.channel_id));
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendTxSignatures event in peer_handler for node {} for channel {}",
- log_pubkey!(node_id),
- log_bytes!(msg.channel_id));
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendTxInitRbf event in peer_handler for node {} for channel {}",
- log_pubkey!(node_id),
- log_bytes!(msg.channel_id));
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendTxAckRbf event in peer_handler for node {} for channel {}",
- log_pubkey!(node_id),
- log_bytes!(msg.channel_id));
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::SendTxAbort { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendTxAbort event in peer_handler for node {} for channel {}",
- log_pubkey!(node_id),
- log_bytes!(msg.channel_id));
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})",
- log_pubkey!(node_id),
- log_bytes!(msg.channel_id));
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
- log_debug!(self.logger, "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}",
- log_pubkey!(node_id),
- update_add_htlcs.len(),
- update_fulfill_htlcs.len(),
- update_fail_htlcs.len(),
- log_bytes!(commitment_signed.channel_id));
- let mut peer = get_peer_for_forwarding!(node_id);
- for msg in update_add_htlcs {
- self.enqueue_message(&mut *peer, msg);
- }
- for msg in update_fulfill_htlcs {
- self.enqueue_message(&mut *peer, msg);
- }
- for msg in update_fail_htlcs {
- self.enqueue_message(&mut *peer, msg);
- }
- for msg in update_fail_malformed_htlcs {
- self.enqueue_message(&mut *peer, msg);
- }
- if let &Some(ref msg) = update_fee {
- self.enqueue_message(&mut *peer, msg);
- }
- self.enqueue_message(&mut *peer, commitment_signed);
- },
- MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}",
- log_pubkey!(node_id),
- log_bytes!(msg.channel_id));
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendClosingSigned event in peer_handler for node {} for channel {}",
- log_pubkey!(node_id),
- log_bytes!(msg.channel_id));
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::SendShutdown { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling Shutdown event in peer_handler for node {} for channel {}",
- log_pubkey!(node_id),
- log_bytes!(msg.channel_id));
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
- log_debug!(self.logger, "Handling SendChannelReestablish event in peer_handler for node {} for channel {}",
- log_pubkey!(node_id),
- log_bytes!(msg.channel_id));
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::SendChannelAnnouncement { ref node_id, ref msg, ref update_msg } => {
- log_debug!(self.logger, "Handling SendChannelAnnouncement event in peer_handler for node {} for short channel id {}",
- log_pubkey!(node_id),
- msg.contents.short_channel_id);
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), update_msg);
- },
- MessageSendEvent::BroadcastChannelAnnouncement { msg, update_msg } => {
- log_debug!(self.logger, "Handling BroadcastChannelAnnouncement event in peer_handler for short channel id {}", msg.contents.short_channel_id);
- match self.message_handler.route_handler.handle_channel_announcement(&msg) {
- Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
- self.forward_broadcast_msg(peers, &wire::Message::ChannelAnnouncement(msg), None),
- _ => {},
- }
- if let Some(msg) = update_msg {
+ for event in events_generated.drain(..) {
+ match event {
+ MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendAcceptChannel event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.temporary_channel_id));
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendAcceptChannelV2 { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendAcceptChannelV2 event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.temporary_channel_id));
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.temporary_channel_id));
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendOpenChannelV2 { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.temporary_channel_id));
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
+ log_pubkey!(node_id),
+ log_bytes!(msg.temporary_channel_id),
+ log_funding_channel_id!(msg.funding_txid, msg.funding_output_index));
+ // TODO: If the peer is gone we should generate a DiscardFunding event
+ // indicating to the wallet that they should just throw away this funding transaction
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendFundingSigned { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendFundingSigned event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendChannelReady { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendChannelReady event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendTxAddInput event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendTxAddOutput event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendTxRemoveInput event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendTxRemoveOutput event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendTxComplete { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendTxComplete event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendTxSignatures event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendTxInitRbf event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendTxAckRbf event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendTxAbort { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendTxAbort event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
+ log_debug!(self.logger, "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}",
+ log_pubkey!(node_id),
+ update_add_htlcs.len(),
+ update_fulfill_htlcs.len(),
+ update_fail_htlcs.len(),
+ log_bytes!(commitment_signed.channel_id));
+ let mut peer = get_peer_for_forwarding!(node_id);
+ for msg in update_add_htlcs {
+ self.enqueue_message(&mut *peer, msg);
+ }
+ for msg in update_fulfill_htlcs {
+ self.enqueue_message(&mut *peer, msg);
+ }
+ for msg in update_fail_htlcs {
+ self.enqueue_message(&mut *peer, msg);
+ }
+ for msg in update_fail_malformed_htlcs {
+ self.enqueue_message(&mut *peer, msg);
+ }
+ if let &Some(ref msg) = update_fee {
+ self.enqueue_message(&mut *peer, msg);
+ }
+ self.enqueue_message(&mut *peer, commitment_signed);
+ },
+ MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendClosingSigned event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendShutdown { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling Shutdown event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
+ log_debug!(self.logger, "Handling SendChannelReestablish event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendChannelAnnouncement { ref node_id, ref msg, ref update_msg } => {
+ log_debug!(self.logger, "Handling SendChannelAnnouncement event in peer_handler for node {} for short channel id {}",
+ log_pubkey!(node_id),
+ msg.contents.short_channel_id);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), update_msg);
+ },
+ MessageSendEvent::BroadcastChannelAnnouncement { msg, update_msg } => {
+ log_debug!(self.logger, "Handling BroadcastChannelAnnouncement event in peer_handler for short channel id {}", msg.contents.short_channel_id);
+ match self.message_handler.route_handler.handle_channel_announcement(&msg) {
+ Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
+ self.forward_broadcast_msg(peers, &wire::Message::ChannelAnnouncement(msg), None),
+ _ => {},
+ }
+ if let Some(msg) = update_msg {
+ match self.message_handler.route_handler.handle_channel_update(&msg) {
+ Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
+ self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None),
+ _ => {},
+ }
+ }
+ },
+ MessageSendEvent::BroadcastChannelUpdate { msg } => {
+ log_debug!(self.logger, "Handling BroadcastChannelUpdate event in peer_handler for short channel id {}", msg.contents.short_channel_id);
match self.message_handler.route_handler.handle_channel_update(&msg) {
Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None),
_ => {},
}
+ },
+ MessageSendEvent::BroadcastNodeAnnouncement { msg } => {
+ log_debug!(self.logger, "Handling BroadcastNodeAnnouncement event in peer_handler for node {}", msg.contents.node_id);
+ match self.message_handler.route_handler.handle_node_announcement(&msg) {
+ Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
+ self.forward_broadcast_msg(peers, &wire::Message::NodeAnnouncement(msg), None),
+ _ => {},
+ }
+ },
+ MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
+ log_trace!(self.logger, "Handling SendChannelUpdate event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id), msg.contents.short_channel_id);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::HandleError { node_id, action } => {
+ match action {
+ msgs::ErrorAction::DisconnectPeer { msg } => {
+ if let Some(msg) = msg.as_ref() {
+ log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
+ log_pubkey!(node_id), msg.data);
+ } else {
+ log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {}",
+ log_pubkey!(node_id));
+ }
+ // We do not have the peers write lock, so we just store that we're
+ // about to disconenct the peer and do it after we finish
+ // processing most messages.
+ let msg = msg.map(|msg| wire::Message::<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>::Error(msg));
+ peers_to_disconnect.insert(node_id, msg);
+ },
+ msgs::ErrorAction::DisconnectPeerWithWarning { msg } => {
+ log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
+ log_pubkey!(node_id), msg.data);
+ // We do not have the peers write lock, so we just store that we're
+ // about to disconenct the peer and do it after we finish
+ // processing most messages.
+ peers_to_disconnect.insert(node_id, Some(wire::Message::Warning(msg)));
+ },
+ msgs::ErrorAction::IgnoreAndLog(level) => {
+ log_given_level!(self.logger, level, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
+ },
+ msgs::ErrorAction::IgnoreDuplicateGossip => {},
+ msgs::ErrorAction::IgnoreError => {
+ log_debug!(self.logger, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
+ },
+ msgs::ErrorAction::SendErrorMessage { ref msg } => {
+ log_trace!(self.logger, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}",
+ log_pubkey!(node_id),
+ msg.data);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(&node_id), msg);
+ },
+ msgs::ErrorAction::SendWarningMessage { ref msg, ref log_level } => {
+ log_given_level!(self.logger, *log_level, "Handling SendWarningMessage HandleError event in peer_handler for node {} with message {}",
+ log_pubkey!(node_id),
+ msg.data);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(&node_id), msg);
+ },
+ }
+ },
+ MessageSendEvent::SendChannelRangeQuery { ref node_id, ref msg } => {
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+ },
+ MessageSendEvent::SendShortIdsQuery { ref node_id, ref msg } => {
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
}
- },
- MessageSendEvent::BroadcastChannelUpdate { msg } => {
- log_debug!(self.logger, "Handling BroadcastChannelUpdate event in peer_handler for short channel id {}", msg.contents.short_channel_id);
- match self.message_handler.route_handler.handle_channel_update(&msg) {
- Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
- self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None),
- _ => {},
- }
- },
- MessageSendEvent::BroadcastNodeAnnouncement { msg } => {
- log_debug!(self.logger, "Handling BroadcastNodeAnnouncement event in peer_handler for node {}", msg.contents.node_id);
- match self.message_handler.route_handler.handle_node_announcement(&msg) {
- Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
- self.forward_broadcast_msg(peers, &wire::Message::NodeAnnouncement(msg), None),
- _ => {},
+ MessageSendEvent::SendReplyChannelRange { ref node_id, ref msg } => {
+ log_gossip!(self.logger, "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}",
+ log_pubkey!(node_id),
+ msg.short_channel_ids.len(),
+ msg.first_blocknum,
+ msg.number_of_blocks,
+ msg.sync_complete);
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
}
- },
- MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
- log_trace!(self.logger, "Handling SendChannelUpdate event in peer_handler for node {} for channel {}",
- log_pubkey!(node_id), msg.contents.short_channel_id);
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::HandleError { ref node_id, ref action } => {
- match *action {
- msgs::ErrorAction::DisconnectPeer { ref msg } => {
- // We do not have the peers write lock, so we just store that we're
- // about to disconenct the peer and do it after we finish
- // processing most messages.
- peers_to_disconnect.insert(*node_id, msg.clone());
- },
- msgs::ErrorAction::IgnoreAndLog(level) => {
- log_given_level!(self.logger, level, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
- },
- msgs::ErrorAction::IgnoreDuplicateGossip => {},
- msgs::ErrorAction::IgnoreError => {
- log_debug!(self.logger, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
- },
- msgs::ErrorAction::SendErrorMessage { ref msg } => {
- log_trace!(self.logger, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}",
- log_pubkey!(node_id),
- msg.data);
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- msgs::ErrorAction::SendWarningMessage { ref msg, ref log_level } => {
- log_given_level!(self.logger, *log_level, "Handling SendWarningMessage HandleError event in peer_handler for node {} with message {}",
- log_pubkey!(node_id),
- msg.data);
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
+ MessageSendEvent::SendGossipTimestampFilter { ref node_id, ref msg } => {
+ self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
}
- },
- MessageSendEvent::SendChannelRangeQuery { ref node_id, ref msg } => {
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- },
- MessageSendEvent::SendShortIdsQuery { ref node_id, ref msg } => {
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- }
- MessageSendEvent::SendReplyChannelRange { ref node_id, ref msg } => {
- log_gossip!(self.logger, "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}",
- log_pubkey!(node_id),
- msg.short_channel_ids.len(),
- msg.first_blocknum,
- msg.number_of_blocks,
- msg.sync_complete);
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
- }
- MessageSendEvent::SendGossipTimestampFilter { ref node_id, ref msg } => {
- self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
}
}
- }
- for (node_id, msg) in self.message_handler.custom_message_handler.get_and_clear_pending_msg() {
- if peers_to_disconnect.get(&node_id).is_some() { continue; }
- self.enqueue_message(&mut *get_peer_for_forwarding!(&node_id), &msg);
- }
+ for (node_id, msg) in self.message_handler.custom_message_handler.get_and_clear_pending_msg() {
+ if peers_to_disconnect.get(&node_id).is_some() { continue; }
+ self.enqueue_message(&mut *get_peer_for_forwarding!(&node_id), &msg);
+ }
- for (descriptor, peer_mutex) in peers.iter() {
- let mut peer = peer_mutex.lock().unwrap();
- if flush_read_disabled { peer.received_channel_announce_since_backlogged = false; }
- self.do_attempt_write_data(&mut (*descriptor).clone(), &mut *peer, flush_read_disabled);
+ for (descriptor, peer_mutex) in peers.iter() {
+ let mut peer = peer_mutex.lock().unwrap();
+ if flush_read_disabled { peer.received_channel_announce_since_backlogged = false; }
+ self.do_attempt_write_data(&mut (*descriptor).clone(), &mut *peer, flush_read_disabled);
+ }
}
- }
- if !peers_to_disconnect.is_empty() {
- let mut peers_lock = self.peers.write().unwrap();
- let peers = &mut *peers_lock;
- for (node_id, msg) in peers_to_disconnect.drain() {
- // Note that since we are holding the peers *write* lock we can
- // remove from node_id_to_descriptor immediately (as no other
- // thread can be holding the peer lock if we have the global write
- // lock).
-
- let descriptor_opt = self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
- if let Some(mut descriptor) = descriptor_opt {
- if let Some(peer_mutex) = peers.remove(&descriptor) {
- let mut peer = peer_mutex.lock().unwrap();
- if let Some(msg) = msg {
- log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
- log_pubkey!(node_id),
- msg.data);
- self.enqueue_message(&mut *peer, &msg);
- // This isn't guaranteed to work, but if there is enough free
- // room in the send buffer, put the error message there...
- self.do_attempt_write_data(&mut descriptor, &mut *peer, false);
- }
- self.do_disconnect(descriptor, &*peer, "DisconnectPeer HandleError");
- } else { debug_assert!(false, "Missing connection for peer"); }
+ if !peers_to_disconnect.is_empty() {
+ let mut peers_lock = self.peers.write().unwrap();
+ let peers = &mut *peers_lock;
+ for (node_id, msg) in peers_to_disconnect.drain() {
+ // Note that since we are holding the peers *write* lock we can
+ // remove from node_id_to_descriptor immediately (as no other
+ // thread can be holding the peer lock if we have the global write
+ // lock).
+
+ let descriptor_opt = self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
+ if let Some(mut descriptor) = descriptor_opt {
+ if let Some(peer_mutex) = peers.remove(&descriptor) {
+ let mut peer = peer_mutex.lock().unwrap();
+ if let Some(msg) = msg {
+ self.enqueue_message(&mut *peer, &msg);
+ // This isn't guaranteed to work, but if there is enough free
+ // room in the send buffer, put the error message there...
+ self.do_attempt_write_data(&mut descriptor, &mut *peer, false);
+ }
+ self.do_disconnect(descriptor, &*peer, "DisconnectPeer HandleError");
+ } else { debug_assert!(false, "Missing connection for peer"); }
+ }
}
}
+
+ if self.event_processing_state.fetch_sub(1, Ordering::AcqRel) != 1 {
+ // If another thread incremented the state while we were running we should go
+ // around again, but only once.
+ self.event_processing_state.store(1, Ordering::Release);
+ continue;
+ }
+ break;
}
}
addresses.sort_by_key(|addr| addr.get_id());
let features = self.message_handler.chan_handler.provided_node_features()
- .or(self.message_handler.route_handler.provided_node_features())
- .or(self.message_handler.onion_message_handler.provided_node_features());
+ | self.message_handler.route_handler.provided_node_features()
+ | self.message_handler.onion_message_handler.provided_node_features()
+ | self.message_handler.custom_message_handler.provided_node_features();
let announcement = msgs::UnsignedNodeAnnouncement {
features,
timestamp: self.last_node_announcement_serial.fetch_add(1, Ordering::AcqRel),
mod tests {
use crate::sign::{NodeSigner, Recipient};
use crate::events;
+ use crate::io;
+ use crate::ln::features::{InitFeatures, NodeFeatures};
use crate::ln::peer_channel_encryptor::PeerChannelEncryptor;
- use crate::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses};
+ use crate::ln::peer_handler::{CustomMessageHandler, PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses};
use crate::ln::{msgs, wire};
- use crate::ln::msgs::NetAddress;
+ use crate::ln::msgs::{LightningError, NetAddress};
use crate::util::test_utils;
- use bitcoin::secp256k1::SecretKey;
+ use bitcoin::secp256k1::{PublicKey, SecretKey};
use crate::prelude::*;
use crate::sync::{Arc, Mutex};
+ use core::convert::Infallible;
use core::sync::atomic::{AtomicBool, Ordering};
#[derive(Clone)]
struct PeerManagerCfg {
chan_handler: test_utils::TestChannelMessageHandler,
routing_handler: test_utils::TestRoutingMessageHandler,
+ custom_handler: TestCustomMessageHandler,
logger: test_utils::TestLogger,
node_signer: test_utils::TestNodeSigner,
}
+ struct TestCustomMessageHandler {
+ features: InitFeatures,
+ }
+
+ impl wire::CustomMessageReader for TestCustomMessageHandler {
+ type CustomMessage = Infallible;
+ fn read<R: io::Read>(&self, _: u16, _: &mut R) -> Result<Option<Self::CustomMessage>, msgs::DecodeError> {
+ Ok(None)
+ }
+ }
+
+ impl CustomMessageHandler for TestCustomMessageHandler {
+ fn handle_custom_message(&self, _: Infallible, _: &PublicKey) -> Result<(), LightningError> {
+ unreachable!();
+ }
+
+ fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> { Vec::new() }
+
+ fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
+
+ fn provided_init_features(&self, _: &PublicKey) -> InitFeatures {
+ self.features.clone()
+ }
+ }
+
fn create_peermgr_cfgs(peer_count: usize) -> Vec<PeerManagerCfg> {
let mut cfgs = Vec::new();
for i in 0..peer_count {
let node_secret = SecretKey::from_slice(&[42 + i as u8; 32]).unwrap();
+ let features = {
+ let mut feature_bits = vec![0u8; 33];
+ feature_bits[32] = 0b00000001;
+ InitFeatures::from_le_bytes(feature_bits)
+ };
cfgs.push(
PeerManagerCfg{
chan_handler: test_utils::TestChannelMessageHandler::new(),
logger: test_utils::TestLogger::new(),
routing_handler: test_utils::TestRoutingMessageHandler::new(),
+ custom_handler: TestCustomMessageHandler { features },
node_signer: test_utils::TestNodeSigner::new(node_secret),
}
);
cfgs
}
- fn create_network<'a>(peer_count: usize, cfgs: &'a Vec<PeerManagerCfg>) -> Vec<PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler, &'a test_utils::TestNodeSigner>> {
+ fn create_incompatible_peermgr_cfgs(peer_count: usize) -> Vec<PeerManagerCfg> {
+ let mut cfgs = Vec::new();
+ for i in 0..peer_count {
+ let node_secret = SecretKey::from_slice(&[42 + i as u8; 32]).unwrap();
+ let features = {
+ let mut feature_bits = vec![0u8; 33 + i + 1];
+ feature_bits[33 + i] = 0b00000001;
+ InitFeatures::from_le_bytes(feature_bits)
+ };
+ cfgs.push(
+ PeerManagerCfg{
+ chan_handler: test_utils::TestChannelMessageHandler::new(),
+ logger: test_utils::TestLogger::new(),
+ routing_handler: test_utils::TestRoutingMessageHandler::new(),
+ custom_handler: TestCustomMessageHandler { features },
+ node_signer: test_utils::TestNodeSigner::new(node_secret),
+ }
+ );
+ }
+
+ cfgs
+ }
+
+ fn create_network<'a>(peer_count: usize, cfgs: &'a Vec<PeerManagerCfg>) -> Vec<PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, &'a TestCustomMessageHandler, &'a test_utils::TestNodeSigner>> {
let mut peers = Vec::new();
for i in 0..peer_count {
let ephemeral_bytes = [i as u8; 32];
let msg_handler = MessageHandler {
chan_handler: &cfgs[i].chan_handler, route_handler: &cfgs[i].routing_handler,
- onion_message_handler: IgnoringMessageHandler {}, custom_message_handler: IgnoringMessageHandler {}
+ onion_message_handler: IgnoringMessageHandler {}, custom_message_handler: &cfgs[i].custom_handler
};
let peer = PeerManager::new(msg_handler, 0, &ephemeral_bytes, &cfgs[i].logger, &cfgs[i].node_signer);
peers.push(peer);
peers
}
- fn establish_connection<'a>(peer_a: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler, &'a test_utils::TestNodeSigner>, peer_b: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler, &'a test_utils::TestNodeSigner>) -> (FileDescriptor, FileDescriptor) {
+ fn establish_connection<'a>(peer_a: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, &'a TestCustomMessageHandler, &'a test_utils::TestNodeSigner>, peer_b: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, &'a TestCustomMessageHandler, &'a test_utils::TestNodeSigner>) -> (FileDescriptor, FileDescriptor) {
let id_a = peer_a.node_signer.get_node_id(Recipient::Node).unwrap();
let mut fd_a = FileDescriptor {
fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
thrd_b.join().unwrap();
}
+ #[test]
+ fn test_incompatible_peers() {
+ let cfgs = create_peermgr_cfgs(2);
+ let incompatible_cfgs = create_incompatible_peermgr_cfgs(2);
+
+ let peers = create_network(2, &cfgs);
+ let incompatible_peers = create_network(2, &incompatible_cfgs);
+ let peer_pairs = [(&peers[0], &incompatible_peers[0]), (&incompatible_peers[1], &peers[1])];
+ for (peer_a, peer_b) in peer_pairs.iter() {
+ let id_a = peer_a.node_signer.get_node_id(Recipient::Node).unwrap();
+ let mut fd_a = FileDescriptor {
+ fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
+ disconnect: Arc::new(AtomicBool::new(false)),
+ };
+ let addr_a = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1000};
+ let mut fd_b = FileDescriptor {
+ fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
+ disconnect: Arc::new(AtomicBool::new(false)),
+ };
+ let addr_b = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1001};
+ let initial_data = peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap();
+ peer_a.new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap();
+ assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false);
+ peer_a.process_events();
+
+ let a_data = fd_a.outbound_data.lock().unwrap().split_off(0);
+ assert_eq!(peer_b.read_event(&mut fd_b, &a_data).unwrap(), false);
+
+ peer_b.process_events();
+ let b_data = fd_b.outbound_data.lock().unwrap().split_off(0);
+
+ // Should fail because of unknown required features
+ assert!(peer_a.read_event(&mut fd_a, &b_data).is_err());
+ }
+ }
+
#[test]
fn test_disconnect_peer() {
// Simple test which builds a network of PeerManager, connects and brings them to NoiseState::Finished and
// For (None)
assert_eq!(filter_addresses(None), None);
}
+
+ #[test]
+ #[cfg(feature = "std")]
+ fn test_process_events_multithreaded() {
+ use std::time::{Duration, Instant};
+ // Test that `process_events` getting called on multiple threads doesn't generate too many
+ // loop iterations.
+ // Each time `process_events` goes around the loop we call
+ // `get_and_clear_pending_msg_events`, which we count using the `TestMessageHandler`.
+ // Because the loop should go around once more after a call which fails to take the
+ // single-threaded lock, if we write zero to the counter before calling `process_events` we
+ // should never observe there having been more than 2 loop iterations.
+ // Further, because the last thread to exit will call `process_events` before returning, we
+ // should always have at least one count at the end.
+ let cfg = Arc::new(create_peermgr_cfgs(1));
+ // Until we have std::thread::scoped we have to unsafe { turn off the borrow checker }.
+ let peer = Arc::new(create_network(1, unsafe { &*(&*cfg as *const _) as &'static _ }).pop().unwrap());
+
+ let exit_flag = Arc::new(AtomicBool::new(false));
+ macro_rules! spawn_thread { () => { {
+ let thread_cfg = Arc::clone(&cfg);
+ let thread_peer = Arc::clone(&peer);
+ let thread_exit = Arc::clone(&exit_flag);
+ std::thread::spawn(move || {
+ while !thread_exit.load(Ordering::Acquire) {
+ thread_cfg[0].chan_handler.message_fetch_counter.store(0, Ordering::Release);
+ thread_peer.process_events();
+ std::thread::sleep(Duration::from_micros(1));
+ }
+ })
+ } } }
+
+ let thread_a = spawn_thread!();
+ let thread_b = spawn_thread!();
+ let thread_c = spawn_thread!();
+
+ let start_time = Instant::now();
+ while start_time.elapsed() < Duration::from_millis(100) {
+ let val = cfg[0].chan_handler.message_fetch_counter.load(Ordering::Acquire);
+ assert!(val <= 2);
+ std::thread::yield_now(); // Winblowz seemingly doesn't ever interrupt threads?!
+ }
+
+ exit_flag.store(true, Ordering::Release);
+ thread_a.join().unwrap();
+ thread_b.join().unwrap();
+ thread_c.join().unwrap();
+ assert!(cfg[0].chan_handler.message_fetch_counter.load(Ordering::Acquire) >= 1);
+ }
}
use crate::util::ser::Writeable;
use crate::util::string::UntrustedString;
-use bitcoin::blockdata::block::{Block, BlockHeader};
use bitcoin::blockdata::script::Builder;
use bitcoin::blockdata::opcodes;
use bitcoin::secp256k1::Secp256k1;
use crate::prelude::*;
-use bitcoin::hashes::Hash;
-use bitcoin::TxMerkleNode;
use crate::ln::functional_test_utils::*;
check_added_monitors!(nodes[2], 1);
get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
- let mut header = BlockHeader { version: 0x2000_0000, prev_blockhash: nodes[2].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
let claim_txn = if local_commitment {
// Broadcast node 1 commitment txn to broadcast the HTLC-Timeout
let node_1_commitment_txn = get_local_commitment_txn!(nodes[1], chan_2.2);
check_spends!(node_1_commitment_txn[1], node_1_commitment_txn[0]);
// Give node 2 node 1's transactions and get its response (claiming the HTLC instead).
- connect_block(&nodes[2], &Block { header, txdata: node_1_commitment_txn.clone() });
+ connect_block(&nodes[2], &create_dummy_block(nodes[2].best_block_hash(), 42, node_1_commitment_txn.clone()));
check_added_monitors!(nodes[2], 1);
check_closed_broadcast!(nodes[2], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
// Make sure node 1's height is the same as the !local_commitment case
connect_blocks(&nodes[1], 1);
// Confirm node 1's commitment txn (and HTLC-Timeout) on node 1
- header.prev_blockhash = nodes[1].best_block_hash();
- connect_block(&nodes[1], &Block { header, txdata: node_1_commitment_txn.clone() });
+ connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, node_1_commitment_txn.clone()));
// ...but return node 1's commitment tx in case claim is set and we're preparing to reorg
vec![node_1_commitment_txn[0].clone(), node_2_commitment_txn[0].clone()]
// Disconnect Node 1's HTLC-Timeout which was connected above
disconnect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
- let block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
- txdata: claim_txn,
- };
- connect_block(&nodes[1], &block);
+ connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, claim_txn));
// ChannelManager only polls chain::Watch::release_pending_monitor_events when we
// probe it for events, so we probe non-message events here (which should just be the
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), true, true);
} else {
// Confirm the timeout tx and check that we fail the HTLC backwards
- let block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
- txdata: vec![],
- };
- connect_block(&nodes[1], &block);
+ connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, Vec::new()));
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
}
if let Event::SpendableOutputs { outputs } = node_a_spendable.pop().unwrap() {
assert_eq!(outputs.len(), 1);
let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
- Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
+ Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &Secp256k1::new()).unwrap();
check_spends!(spend_tx, remote_txn_b[0]);
}
if let Event::SpendableOutputs { outputs } = node_b_spendable.pop().unwrap() {
assert_eq!(outputs.len(), 1);
let spend_tx = nodes[1].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
- Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
+ Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &Secp256k1::new()).unwrap();
check_spends!(spend_tx, remote_txn_a[0]);
}
}
let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[0]);
let payment_params_1 = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV).with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
- let route_1 = get_route(&nodes[0].node.get_our_node_id(), &payment_params_1, &nodes[0].network_graph.read_only(), None, 100000, &logger, &scorer, &random_seed_bytes).unwrap();
+ let route_1 = get_route(&nodes[0].node.get_our_node_id(), &payment_params_1, &nodes[0].network_graph.read_only(), None, 100000, &logger, &scorer, &(), &random_seed_bytes).unwrap();
let payment_params_2 = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), TEST_FINAL_CLTV).with_bolt11_features(nodes[0].node.invoice_features()).unwrap();
- let route_2 = get_route(&nodes[1].node.get_our_node_id(), &payment_params_2, &nodes[1].network_graph.read_only(), None, 100000, &logger, &scorer, &random_seed_bytes).unwrap();
+ let route_2 = get_route(&nodes[1].node.get_our_node_id(), &payment_params_2, &nodes[1].network_graph.read_only(), None, 100000, &logger, &scorer, &(), &random_seed_bytes).unwrap();
unwrap_send_err!(nodes[0].node.send_payment_with_route(&route_1, payment_hash,
RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
), true, APIError::ChannelUnavailable {..}, {});
Custom(T),
}
-impl<T> Message<T> where T: core::fmt::Debug + Type + TestEq {
+impl<T> Writeable for Message<T> where T: core::fmt::Debug + Type + TestEq {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+ match self {
+ &Message::Init(ref msg) => msg.write(writer),
+ &Message::Error(ref msg) => msg.write(writer),
+ &Message::Warning(ref msg) => msg.write(writer),
+ &Message::Ping(ref msg) => msg.write(writer),
+ &Message::Pong(ref msg) => msg.write(writer),
+ &Message::OpenChannel(ref msg) => msg.write(writer),
+ &Message::OpenChannelV2(ref msg) => msg.write(writer),
+ &Message::AcceptChannel(ref msg) => msg.write(writer),
+ &Message::AcceptChannelV2(ref msg) => msg.write(writer),
+ &Message::FundingCreated(ref msg) => msg.write(writer),
+ &Message::FundingSigned(ref msg) => msg.write(writer),
+ &Message::TxAddInput(ref msg) => msg.write(writer),
+ &Message::TxAddOutput(ref msg) => msg.write(writer),
+ &Message::TxRemoveInput(ref msg) => msg.write(writer),
+ &Message::TxRemoveOutput(ref msg) => msg.write(writer),
+ &Message::TxComplete(ref msg) => msg.write(writer),
+ &Message::TxSignatures(ref msg) => msg.write(writer),
+ &Message::TxInitRbf(ref msg) => msg.write(writer),
+ &Message::TxAckRbf(ref msg) => msg.write(writer),
+ &Message::TxAbort(ref msg) => msg.write(writer),
+ &Message::ChannelReady(ref msg) => msg.write(writer),
+ &Message::Shutdown(ref msg) => msg.write(writer),
+ &Message::ClosingSigned(ref msg) => msg.write(writer),
+ &Message::OnionMessage(ref msg) => msg.write(writer),
+ &Message::UpdateAddHTLC(ref msg) => msg.write(writer),
+ &Message::UpdateFulfillHTLC(ref msg) => msg.write(writer),
+ &Message::UpdateFailHTLC(ref msg) => msg.write(writer),
+ &Message::UpdateFailMalformedHTLC(ref msg) => msg.write(writer),
+ &Message::CommitmentSigned(ref msg) => msg.write(writer),
+ &Message::RevokeAndACK(ref msg) => msg.write(writer),
+ &Message::UpdateFee(ref msg) => msg.write(writer),
+ &Message::ChannelReestablish(ref msg) => msg.write(writer),
+ &Message::AnnouncementSignatures(ref msg) => msg.write(writer),
+ &Message::ChannelAnnouncement(ref msg) => msg.write(writer),
+ &Message::NodeAnnouncement(ref msg) => msg.write(writer),
+ &Message::ChannelUpdate(ref msg) => msg.write(writer),
+ &Message::QueryShortChannelIds(ref msg) => msg.write(writer),
+ &Message::ReplyShortChannelIdsEnd(ref msg) => msg.write(writer),
+ &Message::QueryChannelRange(ref msg) => msg.write(writer),
+ &Message::ReplyChannelRange(ref msg) => msg.write(writer),
+ &Message::GossipTimestampFilter(ref msg) => msg.write(writer),
+ &Message::Unknown(_) => { Ok(()) },
+ &Message::Custom(ref msg) => msg.write(writer),
+ }
+ }
+}
+
+impl<T> Type for Message<T> where T: core::fmt::Debug + Type + TestEq {
/// Returns the type that was used to decode the message payload.
- pub fn type_id(&self) -> u16 {
+ fn type_id(&self) -> u16 {
match self {
&Message::Init(ref msg) => msg.type_id(),
&Message::Error(ref msg) => msg.type_id(),
&Message::Custom(ref msg) => msg.type_id(),
}
}
+}
+impl<T> Message<T> where T: core::fmt::Debug + Type + TestEq {
/// Returns whether the message's type is even, indicating both endpoints must support it.
pub fn is_even(&self) -> bool {
(self.type_id() & 1) == 0
BIG_PACKET_HOP_DATA_LEN
} else { return Err(()) };
- Ok(onion_utils::construct_onion_message_packet::<_, _>(
- payloads, onion_keys, prng_seed, hop_data_len))
+ onion_utils::construct_onion_message_packet::<_, _>(
+ payloads, onion_keys, prng_seed, hop_data_len)
}
use crate::prelude::*;
use core::{cmp, fmt};
use core::convert::TryFrom;
-use crate::sync::{RwLock, RwLockReadGuard};
+use crate::sync::{RwLock, RwLockReadGuard, LockTestExt};
#[cfg(feature = "std")]
use core::sync::atomic::{AtomicUsize, Ordering};
use crate::sync::Mutex;
},
}
}
+
+ /// Gets the genesis hash for this network graph.
+ pub fn get_genesis_hash(&self) -> BlockHash {
+ self.genesis_hash
+ }
}
macro_rules! secp_verify_sig {
}
/// Fees for routing via a given channel or a node
-#[derive(Eq, PartialEq, Copy, Clone, Debug, Hash)]
+#[derive(Eq, PartialEq, Copy, Clone, Debug, Hash, Ord, PartialOrd)]
pub struct RoutingFees {
/// Flat routing fee in millisatoshis.
pub base_msat: u32,
impl<L: Deref> Eq for NetworkGraph<L> where L::Target: Logger {}
impl<L: Deref> PartialEq for NetworkGraph<L> where L::Target: Logger {
fn eq(&self, other: &Self) -> bool {
- self.genesis_hash == other.genesis_hash &&
- *self.channels.read().unwrap() == *other.channels.read().unwrap() &&
- *self.nodes.read().unwrap() == *other.nodes.read().unwrap()
+ // For a total lockorder, sort by position in memory and take the inner locks in that order.
+ // (Assumes that we can't move within memory while a lock is held).
+ let ord = ((self as *const _) as usize) < ((other as *const _) as usize);
+ let a = if ord { (&self.channels, &self.nodes) } else { (&other.channels, &other.nodes) };
+ let b = if ord { (&other.channels, &other.nodes) } else { (&self.channels, &self.nodes) };
+ let (channels_a, channels_b) = (a.0.unsafe_well_ordered_double_lock_self(), b.0.unsafe_well_ordered_double_lock_self());
+ let (nodes_a, nodes_b) = (a.1.unsafe_well_ordered_double_lock_self(), b.1.unsafe_well_ordered_double_lock_self());
+ self.genesis_hash.eq(&other.genesis_hash) && channels_a.eq(&channels_b) && nodes_a.eq(&nodes_b)
}
}
}
}
-#[cfg(all(test, feature = "_bench_unstable"))]
-mod benches {
+#[cfg(ldk_bench)]
+pub mod benches {
use super::*;
-
- use test::Bencher;
use std::io::Read;
+ use criterion::{black_box, Criterion};
- #[bench]
- fn read_network_graph(bench: &mut Bencher) {
+ pub fn read_network_graph(bench: &mut Criterion) {
let logger = crate::util::test_utils::TestLogger::new();
let mut d = crate::routing::router::bench_utils::get_route_file().unwrap();
let mut v = Vec::new();
d.read_to_end(&mut v).unwrap();
- bench.iter(|| {
- let _ = NetworkGraph::read(&mut std::io::Cursor::new(&v), &logger).unwrap();
- });
+ bench.bench_function("read_network_graph", |b| b.iter(||
+ NetworkGraph::read(&mut std::io::Cursor::new(black_box(&v)), &logger).unwrap()
+ ));
}
- #[bench]
- fn write_network_graph(bench: &mut Bencher) {
+ pub fn write_network_graph(bench: &mut Criterion) {
let logger = crate::util::test_utils::TestLogger::new();
let mut d = crate::routing::router::bench_utils::get_route_file().unwrap();
let net_graph = NetworkGraph::read(&mut d, &logger).unwrap();
- bench.iter(|| {
- let _ = net_graph.encode();
- });
+ bench.bench_function("write_network_graph", |b| b.iter(||
+ black_box(&net_graph).encode()
+ ));
}
}
use crate::io;
use crate::prelude::*;
-use crate::sync::Mutex;
+use crate::sync::{Mutex, MutexGuard};
use alloc::collections::BinaryHeap;
use core::{cmp, fmt};
use core::ops::Deref;
/// A [`Router`] implemented using [`find_route`].
-pub struct DefaultRouter<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref> where
+pub struct DefaultRouter<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: Score<ScoreParams = SP>> where
L::Target: Logger,
- S::Target: for <'a> LockableScore<'a>,
+ S::Target: for <'a> LockableScore<'a, Locked = MutexGuard<'a, Sc>>,
{
network_graph: G,
logger: L,
random_seed_bytes: Mutex<[u8; 32]>,
- scorer: S
+ scorer: S,
+ score_params: SP
}
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref> DefaultRouter<G, L, S> where
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: Score<ScoreParams = SP>> DefaultRouter<G, L, S, SP, Sc> where
L::Target: Logger,
- S::Target: for <'a> LockableScore<'a>,
+ S::Target: for <'a> LockableScore<'a, Locked = MutexGuard<'a, Sc>>,
{
/// Creates a new router.
- pub fn new(network_graph: G, logger: L, random_seed_bytes: [u8; 32], scorer: S) -> Self {
+ pub fn new(network_graph: G, logger: L, random_seed_bytes: [u8; 32], scorer: S, score_params: SP) -> Self {
let random_seed_bytes = Mutex::new(random_seed_bytes);
- Self { network_graph, logger, random_seed_bytes, scorer }
+ Self { network_graph, logger, random_seed_bytes, scorer, score_params }
}
}
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref> Router for DefaultRouter<G, L, S> where
+impl< G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: Score<ScoreParams = SP>> Router for DefaultRouter<G, L, S, SP, Sc> where
L::Target: Logger,
- S::Target: for <'a> LockableScore<'a>,
+ S::Target: for <'a> LockableScore<'a, Locked = MutexGuard<'a, Sc>>,
{
fn find_route(
- &self, payer: &PublicKey, params: &RouteParameters, first_hops: Option<&[&ChannelDetails]>,
+ &self,
+ payer: &PublicKey,
+ params: &RouteParameters,
+ first_hops: Option<&[&ChannelDetails]>,
inflight_htlcs: &InFlightHtlcs
) -> Result<Route, LightningError> {
let random_seed_bytes = {
*locked_random_seed_bytes = Sha256::hash(&*locked_random_seed_bytes).into_inner();
*locked_random_seed_bytes
};
-
find_route(
payer, params, &self.network_graph, first_hops, &*self.logger,
&ScorerAccountingForInFlightHtlcs::new(self.scorer.lock(), inflight_htlcs),
+ &self.score_params,
&random_seed_bytes
)
}
}
impl<'a, S: Score> Score for ScorerAccountingForInFlightHtlcs<'a, S> {
- fn channel_penalty_msat(&self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage) -> u64 {
+ type ScoreParams = S::ScoreParams;
+ fn channel_penalty_msat(&self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams) -> u64 {
if let Some(used_liquidity) = self.inflight_htlcs.used_liquidity_msat(
source, target, short_channel_id
) {
..usage
};
- self.scorer.channel_penalty_msat(short_channel_id, source, target, usage)
+ self.scorer.channel_penalty_msat(short_channel_id, source, target, usage, score_params)
} else {
- self.scorer.channel_penalty_msat(short_channel_id, source, target, usage)
+ self.scorer.channel_penalty_msat(short_channel_id, source, target, usage, score_params)
}
}
}
/// A list of hops along a payment path terminating with a channel to the recipient.
-#[derive(Clone, Debug, Hash, Eq, PartialEq)]
+#[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub struct RouteHint(pub Vec<RouteHintHop>);
impl Writeable for RouteHint {
}
/// A channel descriptor for a hop along a payment path.
-#[derive(Clone, Debug, Hash, Eq, PartialEq)]
+#[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub struct RouteHintHop {
/// The node_id of the non-target end of the route
pub src_node_id: PublicKey,
/// decrease as well. Thus, we have to explicitly track which nodes have been processed and
/// avoid processing them again.
was_processed: bool,
- #[cfg(all(not(feature = "_bench_unstable"), any(test, fuzzing)))]
+ #[cfg(all(not(ldk_bench), any(test, fuzzing)))]
// In tests, we apply further sanity checks on cases where we skip nodes we already processed
// to ensure it is specifically in cases where the fee has gone down because of a decrease in
// value_contribution_msat, which requires tracking it here. See comments below where it is
.field("path_penalty_msat", &self.path_penalty_msat)
.field("path_htlc_minimum_msat", &self.path_htlc_minimum_msat)
.field("cltv_expiry_delta", &self.candidate.cltv_expiry_delta());
- #[cfg(all(not(feature = "_bench_unstable"), any(test, fuzzing)))]
+ #[cfg(all(not(ldk_bench), any(test, fuzzing)))]
let debug_struct = debug_struct
.field("value_contribution_msat", &self.value_contribution_msat);
debug_struct.finish()
pub fn find_route<L: Deref, GL: Deref, S: Score>(
our_node_pubkey: &PublicKey, route_params: &RouteParameters,
network_graph: &NetworkGraph<GL>, first_hops: Option<&[&ChannelDetails]>, logger: L,
- scorer: &S, random_seed_bytes: &[u8; 32]
+ scorer: &S, score_params: &S::ScoreParams, random_seed_bytes: &[u8; 32]
) -> Result<Route, LightningError>
where L::Target: Logger, GL::Target: Logger {
let graph_lock = network_graph.read_only();
let mut route = get_route(our_node_pubkey, &route_params.payment_params, &graph_lock, first_hops,
- route_params.final_value_msat, logger, scorer,
+ route_params.final_value_msat, logger, scorer, score_params,
random_seed_bytes)?;
add_random_cltv_offset(&mut route, &route_params.payment_params, &graph_lock, random_seed_bytes);
Ok(route)
pub(crate) fn get_route<L: Deref, S: Score>(
our_node_pubkey: &PublicKey, payment_params: &PaymentParameters, network_graph: &ReadOnlyNetworkGraph,
- first_hops: Option<&[&ChannelDetails]>, final_value_msat: u64, logger: L, scorer: &S,
+ first_hops: Option<&[&ChannelDetails]>, final_value_msat: u64, logger: L, scorer: &S, score_params: &S::ScoreParams,
_random_seed_bytes: &[u8; 32]
) -> Result<Route, LightningError>
where L::Target: Logger {
path_htlc_minimum_msat,
path_penalty_msat: u64::max_value(),
was_processed: false,
- #[cfg(all(not(feature = "_bench_unstable"), any(test, fuzzing)))]
+ #[cfg(all(not(ldk_bench), any(test, fuzzing)))]
value_contribution_msat,
}
});
#[allow(unused_mut)] // We only use the mut in cfg(test)
let mut should_process = !old_entry.was_processed;
- #[cfg(all(not(feature = "_bench_unstable"), any(test, fuzzing)))]
+ #[cfg(all(not(ldk_bench), any(test, fuzzing)))]
{
// In test/fuzzing builds, we do extra checks to make sure the skipping
// of already-seen nodes only happens in cases we expect (see below).
effective_capacity,
};
let channel_penalty_msat = scorer.channel_penalty_msat(
- short_channel_id, &$src_node_id, &$dest_node_id, channel_usage
+ short_channel_id, &$src_node_id, &$dest_node_id, channel_usage, score_params
);
let path_penalty_msat = $next_hops_path_penalty_msat
.saturating_add(channel_penalty_msat);
old_entry.fee_msat = 0; // This value will be later filled with hop_use_fee_msat of the following channel
old_entry.path_htlc_minimum_msat = path_htlc_minimum_msat;
old_entry.path_penalty_msat = path_penalty_msat;
- #[cfg(all(not(feature = "_bench_unstable"), any(test, fuzzing)))]
+ #[cfg(all(not(ldk_bench), any(test, fuzzing)))]
{
old_entry.value_contribution_msat = value_contribution_msat;
}
did_add_update_path_to_src_node = true;
} else if old_entry.was_processed && new_cost < old_cost {
- #[cfg(all(not(feature = "_bench_unstable"), any(test, fuzzing)))]
+ #[cfg(all(not(ldk_bench), any(test, fuzzing)))]
{
// If we're skipping processing a node which was previously
// processed even though we found another path to it with a
effective_capacity: candidate.effective_capacity(),
};
let channel_penalty_msat = scorer.channel_penalty_msat(
- hop.short_channel_id, &source, &target, channel_usage
+ hop.short_channel_id, &source, &target, channel_usage, score_params
);
aggregate_next_hops_path_penalty_msat = aggregate_next_hops_path_penalty_msat
.saturating_add(channel_penalty_msat);
}
impl Score for HopScorer {
+ type ScoreParams = ();
fn channel_penalty_msat(&self, _short_channel_id: u64, source: &NodeId, target: &NodeId,
- _usage: ChannelUsage) -> u64
+ _usage: ChannelUsage, _score_params: &Self::ScoreParams) -> u64
{
let mut cur_id = self.our_node_id;
for i in 0..self.hop_ids.len() {
let scorer = HopScorer { our_node_id, hop_ids };
get_route(our_node_pubkey, payment_params, network_graph, None, final_value_msat,
- logger, &scorer, random_seed_bytes)
+ logger, &scorer, &(), random_seed_bytes)
}
#[cfg(test)]
use crate::routing::router::{get_route, build_route_from_hops_internal, add_random_cltv_offset, default_node_features,
BlindedTail, InFlightHtlcs, Path, PaymentParameters, Route, RouteHint, RouteHintHop, RouteHop, RoutingFees,
DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, MAX_PATH_LENGTH_ESTIMATE};
- use crate::routing::scoring::{ChannelUsage, FixedPenaltyScorer, Score, ProbabilisticScorer, ProbabilisticScoringParameters};
+ use crate::routing::scoring::{ChannelUsage, FixedPenaltyScorer, Score, ProbabilisticScorer, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters};
use crate::routing::test_utils::{add_channel, add_or_update_node, build_graph, build_line_graph, id_to_feature_flags, get_nodes, update_channel};
use crate::chain::transaction::OutPoint;
use crate::sign::EntropySource;
// Simple route to 2 via 1
- if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 0, Arc::clone(&logger), &scorer, &random_seed_bytes) {
+ if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 0, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
assert_eq!(err, "Cannot send a payment of 0 msat");
} else { panic!(); }
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths[0].hops.len(), 2);
assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
let our_chans = vec![get_channel_details(Some(2), our_id, InitFeatures::from_le_bytes(vec![0b11]), 100000)];
if let Err(LightningError{err, action: ErrorAction::IgnoreError}) =
- get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 100, Arc::clone(&logger), &scorer, &random_seed_bytes) {
+ get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
assert_eq!(err, "First hop cannot have our_node_pubkey as a destination.");
} else { panic!(); }
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths[0].hops.len(), 2);
}
});
// Not possible to send 199_999_999, because the minimum on channel=2 is 200_000_000.
- if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 199_999_999, Arc::clone(&logger), &scorer, &random_seed_bytes) {
+ if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 199_999_999, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
assert_eq!(err, "Failed to find a path to the given destination");
} else { panic!(); }
});
// A payment above the minimum should pass
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 199_999_999, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 199_999_999, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths[0].hops.len(), 2);
}
excess_data: Vec::new()
});
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 60_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 60_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
// Overpay fees to hit htlc_minimum_msat.
let overpaid_fees = route.paths[0].hops[0].fee_msat + route.paths[1].hops[0].fee_msat;
// TODO: this could be better balanced to overpay 10k and not 15k.
excess_data: Vec::new()
});
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 60_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 60_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
// Fine to overpay for htlc_minimum_msat if it allows us to save fee.
assert_eq!(route.paths.len(), 1);
assert_eq!(route.paths[0].hops[0].short_channel_id, 12);
let fees = route.paths[0].hops[0].fee_msat;
assert_eq!(fees, 5_000);
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 50_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 50_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
// Not fine to overpay for htlc_minimum_msat if it requires paying more than fee on
// the other channel.
assert_eq!(route.paths.len(), 1);
});
// If all the channels require some features we don't understand, route should fail
- if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &random_seed_bytes) {
+ if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
assert_eq!(err, "Failed to find a path to the given destination");
} else { panic!(); }
// If we specify a channel to node7, that overrides our local channel view and that gets used
let our_chans = vec![get_channel_details(Some(42), nodes[7].clone(), InitFeatures::from_le_bytes(vec![0b11]), 250_000_000)];
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 100, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths[0].hops.len(), 2);
assert_eq!(route.paths[0].hops[0].pubkey, nodes[7]);
add_or_update_node(&gossip_sync, &secp_ctx, &privkeys[7], unknown_features.clone(), 1);
// If all nodes require some features we don't understand, route should fail
- if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &random_seed_bytes) {
+ if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
assert_eq!(err, "Failed to find a path to the given destination");
} else { panic!(); }
// If we specify a channel to node7, that overrides our local channel view and that gets used
let our_chans = vec![get_channel_details(Some(42), nodes[7].clone(), InitFeatures::from_le_bytes(vec![0b11]), 250_000_000)];
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 100, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths[0].hops.len(), 2);
assert_eq!(route.paths[0].hops[0].pubkey, nodes[7]);
// Route to 1 via 2 and 3 because our channel to 1 is disabled
let payment_params = PaymentParameters::from_node_id(nodes[0], 42);
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths[0].hops.len(), 3);
assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
// If we specify a channel to node7, that overrides our local channel view and that gets used
let payment_params = PaymentParameters::from_node_id(nodes[2], 42);
let our_chans = vec![get_channel_details(Some(42), nodes[7].clone(), InitFeatures::from_le_bytes(vec![0b11]), 250_000_000)];
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 100, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths[0].hops.len(), 2);
assert_eq!(route.paths[0].hops[0].pubkey, nodes[7]);
invalid_last_hops.push(invalid_last_hop);
{
let payment_params = PaymentParameters::from_node_id(nodes[6], 42).with_route_hints(invalid_last_hops).unwrap();
- if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &random_seed_bytes) {
+ if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
assert_eq!(err, "Route hint cannot have the payee as the source.");
} else { panic!(); }
}
let payment_params = PaymentParameters::from_node_id(nodes[6], 42).with_route_hints(last_hops_multi_private_channels(&nodes)).unwrap();
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths[0].hops.len(), 5);
assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
// Test handling of an empty RouteHint passed in Invoice.
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths[0].hops.len(), 5);
assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
excess_data: Vec::new()
});
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths[0].hops.len(), 4);
assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
excess_data: Vec::new()
});
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &[42u8; 32]).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &[42u8; 32]).unwrap();
assert_eq!(route.paths[0].hops.len(), 4);
assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
// This test shows that public routes can be present in the invoice
// which would be handled in the same manner.
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths[0].hops.len(), 5);
assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
let our_chans = vec![get_channel_details(Some(42), nodes[3].clone(), InitFeatures::from_le_bytes(vec![0b11]), 250_000_000)];
let mut last_hops = last_hops(&nodes);
let payment_params = PaymentParameters::from_node_id(nodes[6], 42).with_route_hints(last_hops.clone()).unwrap();
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 100, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths[0].hops.len(), 2);
assert_eq!(route.paths[0].hops[0].pubkey, nodes[3]);
// Revert to via 6 as the fee on 8 goes up
let payment_params = PaymentParameters::from_node_id(nodes[6], 42).with_route_hints(last_hops).unwrap();
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths[0].hops.len(), 4);
assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
assert_eq!(route.paths[0].hops[3].channel_features.le_flags(), &Vec::<u8>::new()); // We can't learn any flags from invoices, sadly
// ...but still use 8 for larger payments as 6 has a variable feerate
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 2000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 2000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths[0].hops.len(), 5);
assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
let logger = ln_test_utils::TestLogger::new();
let network_graph = NetworkGraph::new(Network::Testnet, &logger);
let route = get_route(&source_node_id, &payment_params, &network_graph.read_only(),
- Some(&our_chans.iter().collect::<Vec<_>>()), route_val, &logger, &scorer, &random_seed_bytes);
+ Some(&our_chans.iter().collect::<Vec<_>>()), route_val, &logger, &scorer, &(), &random_seed_bytes);
route
}
{
// Attempt to route more than available results in a failure.
if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
- &our_id, &payment_params, &network_graph.read_only(), None, 250_000_001, Arc::clone(&logger), &scorer, &random_seed_bytes) {
+ &our_id, &payment_params, &network_graph.read_only(), None, 250_000_001, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
assert_eq!(err, "Failed to find a sufficient route to the given destination");
} else { panic!(); }
}
{
// Now, attempt to route an exact amount we have should be fine.
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 250_000_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 250_000_000, Arc::clone(&logger), &scorer, &(),&random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 1);
let path = route.paths.last().unwrap();
assert_eq!(path.hops.len(), 2);
{
// Attempt to route more than available results in a failure.
if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
- &our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 200_000_001, Arc::clone(&logger), &scorer, &random_seed_bytes) {
+ &our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 200_000_001, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
assert_eq!(err, "Failed to find a sufficient route to the given destination");
} else { panic!(); }
}
{
// Now, attempt to route an exact amount we have should be fine.
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 200_000_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()), 200_000_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 1);
let path = route.paths.last().unwrap();
assert_eq!(path.hops.len(), 2);
{
// Attempt to route more than available results in a failure.
if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
- &our_id, &payment_params, &network_graph.read_only(), None, 15_001, Arc::clone(&logger), &scorer, &random_seed_bytes) {
+ &our_id, &payment_params, &network_graph.read_only(), None, 15_001, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
assert_eq!(err, "Failed to find a sufficient route to the given destination");
} else { panic!(); }
}
{
// Now, attempt to route an exact amount we have should be fine.
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 15_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 15_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 1);
let path = route.paths.last().unwrap();
assert_eq!(path.hops.len(), 2);
{
// Attempt to route more than available results in a failure.
if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
- &our_id, &payment_params, &network_graph.read_only(), None, 15_001, Arc::clone(&logger), &scorer, &random_seed_bytes) {
+ &our_id, &payment_params, &network_graph.read_only(), None, 15_001, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
assert_eq!(err, "Failed to find a sufficient route to the given destination");
} else { panic!(); }
}
{
// Now, attempt to route an exact amount we have should be fine.
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 15_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 15_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 1);
let path = route.paths.last().unwrap();
assert_eq!(path.hops.len(), 2);
{
// Attempt to route more than available results in a failure.
if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
- &our_id, &payment_params, &network_graph.read_only(), None, 10_001, Arc::clone(&logger), &scorer, &random_seed_bytes) {
+ &our_id, &payment_params, &network_graph.read_only(), None, 10_001, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
assert_eq!(err, "Failed to find a sufficient route to the given destination");
} else { panic!(); }
}
{
// Now, attempt to route an exact amount we have should be fine.
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 10_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 10_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 1);
let path = route.paths.last().unwrap();
assert_eq!(path.hops.len(), 2);
{
// Attempt to route more than available results in a failure.
if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
- &our_id, &payment_params, &network_graph.read_only(), None, 60_000, Arc::clone(&logger), &scorer, &random_seed_bytes) {
+ &our_id, &payment_params, &network_graph.read_only(), None, 60_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
assert_eq!(err, "Failed to find a sufficient route to the given destination");
} else { panic!(); }
}
{
// Now, attempt to route 49 sats (just a bit below the capacity).
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 49_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 49_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 1);
let mut total_amount_paid_msat = 0;
for path in &route.paths {
{
// Attempt to route an exact amount is also fine
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 50_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 50_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 1);
let mut total_amount_paid_msat = 0;
for path in &route.paths {
});
{
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 50_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 50_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 1);
let mut total_amount_paid_msat = 0;
for path in &route.paths {
// Attempt to route more than available results in a failure.
if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
&our_id, &payment_params, &network_graph.read_only(), None, 300_000,
- Arc::clone(&logger), &scorer, &random_seed_bytes) {
+ Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
assert_eq!(err, "Failed to find a sufficient route to the given destination");
} else { panic!(); }
}
let zero_payment_params = payment_params.clone().with_max_path_count(0);
if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
&our_id, &zero_payment_params, &network_graph.read_only(), None, 100,
- Arc::clone(&logger), &scorer, &random_seed_bytes) {
+ Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
assert_eq!(err, "Can't find a route with no paths allowed.");
} else { panic!(); }
}
let fail_payment_params = payment_params.clone().with_max_path_count(3);
if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
&our_id, &fail_payment_params, &network_graph.read_only(), None, 250_000,
- Arc::clone(&logger), &scorer, &random_seed_bytes) {
+ Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
assert_eq!(err, "Failed to find a sufficient route to the given destination");
} else { panic!(); }
}
// Now, attempt to route 250 sats (just a bit below the capacity).
// Our algorithm should provide us with these 3 paths.
let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None,
- 250_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ 250_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 3);
let mut total_amount_paid_msat = 0;
for path in &route.paths {
{
// Attempt to route an exact amount is also fine
let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None,
- 290_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ 290_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 3);
let mut total_amount_paid_msat = 0;
for path in &route.paths {
{
// Attempt to route more than available results in a failure.
if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
- &our_id, &payment_params, &network_graph.read_only(), None, 350_000, Arc::clone(&logger), &scorer, &random_seed_bytes) {
+ &our_id, &payment_params, &network_graph.read_only(), None, 350_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
assert_eq!(err, "Failed to find a sufficient route to the given destination");
} else { panic!(); }
}
{
// Now, attempt to route 300 sats (exact amount we can route).
// Our algorithm should provide us with these 3 paths, 100 sats each.
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 300_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 300_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 3);
let mut total_amount_paid_msat = 0;
{
// Now, attempt to route 180 sats.
// Our algorithm should provide us with these 2 paths.
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 180_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 180_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 2);
let mut total_value_transferred_msat = 0;
{
// Attempt to route more than available results in a failure.
if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
- &our_id, &payment_params, &network_graph.read_only(), None, 210_000, Arc::clone(&logger), &scorer, &random_seed_bytes) {
+ &our_id, &payment_params, &network_graph.read_only(), None, 210_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
assert_eq!(err, "Failed to find a sufficient route to the given destination");
} else { panic!(); }
}
{
// Now, attempt to route 200 sats (exact amount we can route).
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 200_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 200_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 2);
let mut total_amount_paid_msat = 0;
// Get a route for 100 sats and check that we found the MPP route no problem and didn't
// overpay at all.
- let mut route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let mut route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 2);
route.paths.sort_by_key(|path| path.hops[0].short_channel_id);
// Paths are manually ordered ordered by SCID, so:
{
// Attempt to route more than available results in a failure.
if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
- &our_id, &payment_params, &network_graph.read_only(), None, 150_000, Arc::clone(&logger), &scorer, &random_seed_bytes) {
+ &our_id, &payment_params, &network_graph.read_only(), None, 150_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
assert_eq!(err, "Failed to find a sufficient route to the given destination");
} else { panic!(); }
}
{
// Now, attempt to route 125 sats (just a bit below the capacity of 3 channels).
// Our algorithm should provide us with these 3 paths.
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 125_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 125_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 3);
let mut total_amount_paid_msat = 0;
for path in &route.paths {
{
// Attempt to route without the last small cheap channel
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 90_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 90_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 2);
let mut total_amount_paid_msat = 0;
for path in &route.paths {
{
// Now ensure the route flows simply over nodes 1 and 4 to 6.
- let route = get_route(&our_id, &payment_params, &network.read_only(), None, 10_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network.read_only(), None, 10_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 1);
assert_eq!(route.paths[0].hops.len(), 3);
{
// Now, attempt to route 90 sats, which is exactly 90 sats at the last hop, plus the
// 200% fee charged channel 13 in the 1-to-2 direction.
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 90_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 90_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 1);
assert_eq!(route.paths[0].hops.len(), 2);
// Now, attempt to route 90 sats, hitting the htlc_minimum on channel 4, but
// overshooting the htlc_maximum on channel 2. Thus, we should pick the (absurdly
// expensive) channels 12-13 path.
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 90_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 90_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 1);
assert_eq!(route.paths[0].hops.len(), 2);
let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&[
&get_channel_details(Some(3), nodes[0], channelmanager::provided_init_features(&config), 200_000),
&get_channel_details(Some(2), nodes[0], channelmanager::provided_init_features(&config), 10_000),
- ]), 100_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ ]), 100_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 1);
assert_eq!(route.paths[0].hops.len(), 1);
let route = get_route(&our_id, &payment_params, &network_graph.read_only(), Some(&[
&get_channel_details(Some(3), nodes[0], channelmanager::provided_init_features(&config), 50_000),
&get_channel_details(Some(2), nodes[0], channelmanager::provided_init_features(&config), 50_000),
- ]), 100_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ ]), 100_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 2);
assert_eq!(route.paths[0].hops.len(), 1);
assert_eq!(route.paths[1].hops.len(), 1);
&get_channel_details(Some(8), nodes[0], channelmanager::provided_init_features(&config), 50_000),
&get_channel_details(Some(9), nodes[0], channelmanager::provided_init_features(&config), 50_000),
&get_channel_details(Some(4), nodes[0], channelmanager::provided_init_features(&config), 1_000_000),
- ]), 100_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ ]), 100_000, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 1);
assert_eq!(route.paths[0].hops.len(), 1);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let route = get_route(
&our_id, &payment_params, &network_graph.read_only(), None, 100,
- Arc::clone(&logger), &scorer, &random_seed_bytes
+ Arc::clone(&logger), &scorer, &(), &random_seed_bytes
).unwrap();
let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
let scorer = FixedPenaltyScorer::with_penalty(100);
let route = get_route(
&our_id, &payment_params, &network_graph.read_only(), None, 100,
- Arc::clone(&logger), &scorer, &random_seed_bytes
+ Arc::clone(&logger), &scorer, &(), &random_seed_bytes
).unwrap();
let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
fn write<W: Writer>(&self, _w: &mut W) -> Result<(), crate::io::Error> { unimplemented!() }
}
impl Score for BadChannelScorer {
- fn channel_penalty_msat(&self, short_channel_id: u64, _: &NodeId, _: &NodeId, _: ChannelUsage) -> u64 {
+ type ScoreParams = ();
+ fn channel_penalty_msat(&self, short_channel_id: u64, _: &NodeId, _: &NodeId, _: ChannelUsage, _score_params:&Self::ScoreParams) -> u64 {
if short_channel_id == self.short_channel_id { u64::max_value() } else { 0 }
}
}
impl Score for BadNodeScorer {
- fn channel_penalty_msat(&self, _: u64, _: &NodeId, target: &NodeId, _: ChannelUsage) -> u64 {
+ type ScoreParams = ();
+ fn channel_penalty_msat(&self, _: u64, _: &NodeId, target: &NodeId, _: ChannelUsage, _score_params:&Self::ScoreParams) -> u64 {
if *target == self.node_id { u64::max_value() } else { 0 }
}
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let route = get_route(
&our_id, &payment_params, &network_graph, None, 100,
- Arc::clone(&logger), &scorer, &random_seed_bytes
+ Arc::clone(&logger), &scorer, &(), &random_seed_bytes
).unwrap();
let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
let scorer = BadChannelScorer { short_channel_id: 6 };
let route = get_route(
&our_id, &payment_params, &network_graph, None, 100,
- Arc::clone(&logger), &scorer, &random_seed_bytes
+ Arc::clone(&logger), &scorer, &(), &random_seed_bytes
).unwrap();
let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
let scorer = BadNodeScorer { node_id: NodeId::from_pubkey(&nodes[2]) };
match get_route(
&our_id, &payment_params, &network_graph, None, 100,
- Arc::clone(&logger), &scorer, &random_seed_bytes
+ Arc::clone(&logger), &scorer, &(), &random_seed_bytes
) {
Err(LightningError { err, .. } ) => {
assert_eq!(err, "Failed to find a path to the given destination");
.with_max_total_cltv_expiry_delta(feasible_max_total_cltv_delta);
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
- let route = get_route(&our_id, &feasible_payment_params, &network_graph, None, 100, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &feasible_payment_params, &network_graph, None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
assert_ne!(path.len(), 0);
let fail_max_total_cltv_delta = 23;
let fail_payment_params = PaymentParameters::from_node_id(nodes[6], 0).with_route_hints(last_hops(&nodes)).unwrap()
.with_max_total_cltv_expiry_delta(fail_max_total_cltv_delta);
- match get_route(&our_id, &fail_payment_params, &network_graph, None, 100, Arc::clone(&logger), &scorer, &random_seed_bytes)
+ match get_route(&our_id, &fail_payment_params, &network_graph, None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes)
{
Err(LightningError { err, .. } ) => {
assert_eq!(err, "Failed to find a path to the given destination");
// We should be able to find a route initially, and then after we fail a few random
// channels eventually we won't be able to any longer.
- assert!(get_route(&our_id, &payment_params, &network_graph, None, 100, Arc::clone(&logger), &scorer, &random_seed_bytes).is_ok());
+ assert!(get_route(&our_id, &payment_params, &network_graph, None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).is_ok());
loop {
- if let Ok(route) = get_route(&our_id, &payment_params, &network_graph, None, 100, Arc::clone(&logger), &scorer, &random_seed_bytes) {
+ if let Ok(route) = get_route(&our_id, &payment_params, &network_graph, None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
for chan in route.paths[0].hops.iter() {
assert!(!payment_params.previously_failed_channels.contains(&chan.short_channel_id));
}
// First check we can actually create a long route on this graph.
let feasible_payment_params = PaymentParameters::from_node_id(nodes[18], 0);
let route = get_route(&our_id, &feasible_payment_params, &network_graph, None, 100,
- Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
assert!(path.len() == MAX_PATH_LENGTH_ESTIMATE.into());
// But we can't create a path surpassing the MAX_PATH_LENGTH_ESTIMATE limit.
let fail_payment_params = PaymentParameters::from_node_id(nodes[19], 0);
match get_route(&our_id, &fail_payment_params, &network_graph, None, 100,
- Arc::clone(&logger), &scorer, &random_seed_bytes)
+ Arc::clone(&logger), &scorer, &(), &random_seed_bytes)
{
Err(LightningError { err, .. } ) => {
assert_eq!(err, "Failed to find a path to the given destination");
let payment_params = PaymentParameters::from_node_id(nodes[6], 42).with_route_hints(last_hops(&nodes)).unwrap();
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 1);
let cltv_expiry_deltas_before = route.paths[0].hops.iter().map(|h| h.cltv_expiry_delta).collect::<Vec<u32>>();
let random_seed_bytes = keys_manager.get_secure_random_bytes();
let mut route = get_route(&our_id, &payment_params, &network_graph, None, 100,
- Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
add_random_cltv_offset(&mut route, &payment_params, &network_graph, &random_seed_bytes);
let mut path_plausibility = vec![];
fn avoids_saturating_channels() {
let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph();
let (_, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
-
- let scorer = ProbabilisticScorer::new(Default::default(), &*network_graph, Arc::clone(&logger));
+ let decay_params = ProbabilisticScoringDecayParameters::default();
+ let scorer = ProbabilisticScorer::new(decay_params, &*network_graph, Arc::clone(&logger));
// Set the fee on channel 13 to 100% to match channel 4 giving us two equivalent paths (us
// -> node 7 -> node2 and us -> node 1 -> node 2) which we should balance over.
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
// 100,000 sats is less than the available liquidity on each channel, set above.
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100_000_000, Arc::clone(&logger), &scorer, &random_seed_bytes).unwrap();
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100_000_000, Arc::clone(&logger), &scorer, &ProbabilisticScoringFeeParameters::default(), &random_seed_bytes).unwrap();
assert_eq!(route.paths.len(), 2);
assert!((route.paths[0].hops[1].short_channel_id == 4 && route.paths[1].hops[1].short_channel_id == 13) ||
(route.paths[1].hops[1].short_channel_id == 4 && route.paths[0].hops[1].short_channel_id == 13));
println!("Using seed of {}", seed);
seed
}
- #[cfg(not(feature = "no-std"))]
- use crate::util::ser::ReadableArgs;
#[test]
#[cfg(not(feature = "no-std"))]
fn generate_routes() {
- use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringParameters};
+ use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
- let mut d = match super::bench_utils::get_route_file() {
+ let logger = ln_test_utils::TestLogger::new();
+ let graph = match super::bench_utils::read_network_graph(&logger) {
Ok(f) => f,
Err(e) => {
eprintln!("{}", e);
return;
},
};
- let logger = ln_test_utils::TestLogger::new();
- let graph = NetworkGraph::read(&mut d, &logger).unwrap();
- let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
- let random_seed_bytes = keys_manager.get_secure_random_bytes();
- // First, get 100 (source, destination) pairs for which route-getting actually succeeds...
- let mut seed = random_init_seed() as usize;
- let nodes = graph.read_only().nodes().clone();
- 'load_endpoints: for _ in 0..10 {
- loop {
- seed = seed.overflowing_mul(0xdeadbeef).0;
- let src = &PublicKey::from_slice(nodes.unordered_keys().skip(seed % nodes.len()).next().unwrap().as_slice()).unwrap();
- seed = seed.overflowing_mul(0xdeadbeef).0;
- let dst = PublicKey::from_slice(nodes.unordered_keys().skip(seed % nodes.len()).next().unwrap().as_slice()).unwrap();
- let payment_params = PaymentParameters::from_node_id(dst, 42);
- let amt = seed as u64 % 200_000_000;
- let params = ProbabilisticScoringParameters::default();
- let scorer = ProbabilisticScorer::new(params, &graph, &logger);
- if get_route(src, &payment_params, &graph.read_only(), None, amt, &logger, &scorer, &random_seed_bytes).is_ok() {
- continue 'load_endpoints;
- }
- }
- }
+ let params = ProbabilisticScoringFeeParameters::default();
+ let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &graph, &logger);
+ let features = super::InvoiceFeatures::empty();
+
+ super::bench_utils::generate_test_routes(&graph, &mut scorer, ¶ms, features, random_init_seed(), 0, 2);
}
#[test]
#[cfg(not(feature = "no-std"))]
fn generate_routes_mpp() {
- use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringParameters};
+ use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
- let mut d = match super::bench_utils::get_route_file() {
+ let logger = ln_test_utils::TestLogger::new();
+ let graph = match super::bench_utils::read_network_graph(&logger) {
Ok(f) => f,
Err(e) => {
eprintln!("{}", e);
return;
},
};
+
+ let params = ProbabilisticScoringFeeParameters::default();
+ let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &graph, &logger);
+ let features = channelmanager::provided_invoice_features(&UserConfig::default());
+
+ super::bench_utils::generate_test_routes(&graph, &mut scorer, ¶ms, features, random_init_seed(), 0, 2);
+ }
+
+ #[test]
+ #[cfg(not(feature = "no-std"))]
+ fn generate_large_mpp_routes() {
+ use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
+
let logger = ln_test_utils::TestLogger::new();
- let graph = NetworkGraph::read(&mut d, &logger).unwrap();
- let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
- let random_seed_bytes = keys_manager.get_secure_random_bytes();
- let config = UserConfig::default();
+ let graph = match super::bench_utils::read_network_graph(&logger) {
+ Ok(f) => f,
+ Err(e) => {
+ eprintln!("{}", e);
+ return;
+ },
+ };
- // First, get 100 (source, destination) pairs for which route-getting actually succeeds...
- let mut seed = random_init_seed() as usize;
- let nodes = graph.read_only().nodes().clone();
- 'load_endpoints: for _ in 0..10 {
- loop {
- seed = seed.overflowing_mul(0xdeadbeef).0;
- let src = &PublicKey::from_slice(nodes.unordered_keys().skip(seed % nodes.len()).next().unwrap().as_slice()).unwrap();
- seed = seed.overflowing_mul(0xdeadbeef).0;
- let dst = PublicKey::from_slice(nodes.unordered_keys().skip(seed % nodes.len()).next().unwrap().as_slice()).unwrap();
- let payment_params = PaymentParameters::from_node_id(dst, 42).with_bolt11_features(channelmanager::provided_invoice_features(&config)).unwrap();
- let amt = seed as u64 % 200_000_000;
- let params = ProbabilisticScoringParameters::default();
- let scorer = ProbabilisticScorer::new(params, &graph, &logger);
- if get_route(src, &payment_params, &graph.read_only(), None, amt, &logger, &scorer, &random_seed_bytes).is_ok() {
- continue 'load_endpoints;
- }
- }
- }
+ let params = ProbabilisticScoringFeeParameters::default();
+ let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &graph, &logger);
+ let features = channelmanager::provided_invoice_features(&UserConfig::default());
+
+ super::bench_utils::generate_test_routes(&graph, &mut scorer, ¶ms, features, random_init_seed(), 1_000_000, 2);
}
#[test]
let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
- let scorer_params = ProbabilisticScoringParameters::default();
- let mut scorer = ProbabilisticScorer::new(scorer_params, Arc::clone(&network_graph), Arc::clone(&logger));
+ let mut scorer_params = ProbabilisticScoringFeeParameters::default();
+ let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), Arc::clone(&network_graph), Arc::clone(&logger));
// First check set manual penalties are returned by the scorer.
let usage = ChannelUsage {
inflight_htlc_msat: 0,
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_000 },
};
- scorer.set_manual_penalty(&NodeId::from_pubkey(&nodes[3]), 123);
- scorer.set_manual_penalty(&NodeId::from_pubkey(&nodes[4]), 456);
- assert_eq!(scorer.channel_penalty_msat(42, &NodeId::from_pubkey(&nodes[3]), &NodeId::from_pubkey(&nodes[4]), usage), 456);
+ scorer_params.set_manual_penalty(&NodeId::from_pubkey(&nodes[3]), 123);
+ scorer_params.set_manual_penalty(&NodeId::from_pubkey(&nodes[4]), 456);
+ assert_eq!(scorer.channel_penalty_msat(42, &NodeId::from_pubkey(&nodes[3]), &NodeId::from_pubkey(&nodes[4]), usage, &scorer_params), 456);
// Then check we can get a normal route
let payment_params = PaymentParameters::from_node_id(nodes[10], 42);
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &random_seed_bytes);
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &scorer_params,&random_seed_bytes);
assert!(route.is_ok());
// Then check that we can't get a route if we ban an intermediate node.
- scorer.add_banned(&NodeId::from_pubkey(&nodes[3]));
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &random_seed_bytes);
+ scorer_params.add_banned(&NodeId::from_pubkey(&nodes[3]));
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &scorer_params,&random_seed_bytes);
assert!(route.is_err());
// Finally make sure we can route again, when we remove the ban.
- scorer.remove_banned(&NodeId::from_pubkey(&nodes[3]));
- let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &random_seed_bytes);
+ scorer_params.remove_banned(&NodeId::from_pubkey(&nodes[3]));
+ let route = get_route(&our_id, &payment_params, &network_graph.read_only(), None, 100, Arc::clone(&logger), &scorer, &scorer_params,&random_seed_bytes);
assert!(route.is_ok());
}
}
}
-#[cfg(all(test, not(feature = "no-std")))]
+#[cfg(all(any(test, ldk_bench), not(feature = "no-std")))]
pub(crate) mod bench_utils {
+ use super::*;
use std::fs::File;
+
+ use bitcoin::hashes::Hash;
+ use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
+
+ use crate::chain::transaction::OutPoint;
+ use crate::sign::{EntropySource, KeysManager};
+ use crate::ln::channelmanager::{self, ChannelCounterparty, ChannelDetails};
+ use crate::ln::features::InvoiceFeatures;
+ use crate::routing::gossip::NetworkGraph;
+ use crate::util::config::UserConfig;
+ use crate::util::ser::ReadableArgs;
+ use crate::util::test_utils::TestLogger;
+
/// Tries to open a network graph file, or panics with a URL to fetch it.
pub(crate) fn get_route_file() -> Result<std::fs::File, &'static str> {
let res = File::open("net_graph-2023-01-18.bin") // By default we're run in RL/lightning
path.pop(); // target
path.push("lightning");
path.push("net_graph-2023-01-18.bin");
- eprintln!("{}", path.to_str().unwrap());
+ File::open(path)
+ })
+ .or_else(|_| { // Fall back to guessing based on the binary location for a subcrate
+ // path is likely something like .../rust-lightning/bench/target/debug/deps/bench..
+ let mut path = std::env::current_exe().unwrap();
+ path.pop(); // bench...
+ path.pop(); // deps
+ path.pop(); // debug
+ path.pop(); // target
+ path.pop(); // bench
+ path.push("lightning");
+ path.push("net_graph-2023-01-18.bin");
File::open(path)
})
.map_err(|_| "Please fetch https://bitcoin.ninja/ldk-net_graph-v0.0.113-2023-01-18.bin and place it at lightning/net_graph-2023-01-18.bin");
#[cfg(not(require_route_graph_test))]
return res;
}
-}
-#[cfg(all(test, feature = "_bench_unstable", not(feature = "no-std")))]
-mod benches {
- use super::*;
- use bitcoin::hashes::Hash;
- use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
- use crate::chain::transaction::OutPoint;
- use crate::sign::{EntropySource, KeysManager};
- use crate::ln::channelmanager::{self, ChannelCounterparty, ChannelDetails};
- use crate::ln::features::InvoiceFeatures;
- use crate::routing::gossip::NetworkGraph;
- use crate::routing::scoring::{FixedPenaltyScorer, ProbabilisticScorer, ProbabilisticScoringParameters};
- use crate::util::config::UserConfig;
- use crate::util::logger::{Logger, Record};
- use crate::util::ser::ReadableArgs;
-
- use test::Bencher;
-
- struct DummyLogger {}
- impl Logger for DummyLogger {
- fn log(&self, _record: &Record) {}
- }
-
- fn read_network_graph(logger: &DummyLogger) -> NetworkGraph<&DummyLogger> {
- let mut d = bench_utils::get_route_file().unwrap();
- NetworkGraph::read(&mut d, logger).unwrap()
+ pub(crate) fn read_network_graph(logger: &TestLogger) -> Result<NetworkGraph<&TestLogger>, &'static str> {
+ get_route_file().map(|mut f| NetworkGraph::read(&mut f, logger).unwrap())
}
- fn payer_pubkey() -> PublicKey {
+ pub(crate) fn payer_pubkey() -> PublicKey {
let secp_ctx = Secp256k1::new();
PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap())
}
#[inline]
- fn first_hop(node_id: PublicKey) -> ChannelDetails {
+ pub(crate) fn first_hop(node_id: PublicKey) -> ChannelDetails {
ChannelDetails {
channel_id: [0; 32],
counterparty: ChannelCounterparty {
short_channel_id: Some(1),
inbound_scid_alias: None,
outbound_scid_alias: None,
- channel_value_satoshis: 10_000_000,
+ channel_value_satoshis: 10_000_000_000,
user_channel_id: 0,
- balance_msat: 10_000_000,
- outbound_capacity_msat: 10_000_000,
- next_outbound_htlc_limit_msat: 10_000_000,
+ balance_msat: 10_000_000_000,
+ outbound_capacity_msat: 10_000_000_000,
+ next_outbound_htlc_limit_msat: 10_000_000_000,
inbound_capacity_msat: 0,
unspendable_punishment_reserve: None,
confirmations_required: None,
}
}
- #[bench]
- fn generate_routes_with_zero_penalty_scorer(bench: &mut Bencher) {
- let logger = DummyLogger {};
- let network_graph = read_network_graph(&logger);
- let scorer = FixedPenaltyScorer::with_penalty(0);
- generate_routes(bench, &network_graph, scorer, InvoiceFeatures::empty());
- }
-
- #[bench]
- fn generate_mpp_routes_with_zero_penalty_scorer(bench: &mut Bencher) {
- let logger = DummyLogger {};
- let network_graph = read_network_graph(&logger);
- let scorer = FixedPenaltyScorer::with_penalty(0);
- generate_routes(bench, &network_graph, scorer, channelmanager::provided_invoice_features(&UserConfig::default()));
- }
-
- #[bench]
- fn generate_routes_with_probabilistic_scorer(bench: &mut Bencher) {
- let logger = DummyLogger {};
- let network_graph = read_network_graph(&logger);
- let params = ProbabilisticScoringParameters::default();
- let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
- generate_routes(bench, &network_graph, scorer, InvoiceFeatures::empty());
- }
-
- #[bench]
- fn generate_mpp_routes_with_probabilistic_scorer(bench: &mut Bencher) {
- let logger = DummyLogger {};
- let network_graph = read_network_graph(&logger);
- let params = ProbabilisticScoringParameters::default();
- let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
- generate_routes(bench, &network_graph, scorer, channelmanager::provided_invoice_features(&UserConfig::default()));
- }
-
- fn generate_routes<S: Score>(
- bench: &mut Bencher, graph: &NetworkGraph<&DummyLogger>, mut scorer: S,
- features: InvoiceFeatures
- ) {
- let nodes = graph.read_only().nodes().clone();
+ pub(crate) fn generate_test_routes<S: Score>(graph: &NetworkGraph<&TestLogger>, scorer: &mut S,
+ score_params: &S::ScoreParams, features: InvoiceFeatures, mut seed: u64,
+ starting_amount: u64, route_count: usize,
+ ) -> Vec<(ChannelDetails, PaymentParameters, u64)> {
let payer = payer_pubkey();
let keys_manager = KeysManager::new(&[0u8; 32], 42, 42);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
- // First, get 100 (source, destination) pairs for which route-getting actually succeeds...
- let mut routes = Vec::new();
+ let nodes = graph.read_only().nodes().clone();
let mut route_endpoints = Vec::new();
- let mut seed: usize = 0xdeadbeef;
- 'load_endpoints: for _ in 0..150 {
+ // Fetch 1.5x more routes than we need as after we do some scorer updates we may end up
+ // with some routes we picked being un-routable.
+ for _ in 0..route_count * 3 / 2 {
loop {
- seed *= 0xdeadbeef;
- let src = PublicKey::from_slice(nodes.unordered_keys().skip(seed % nodes.len()).next().unwrap().as_slice()).unwrap();
- seed *= 0xdeadbeef;
- let dst = PublicKey::from_slice(nodes.unordered_keys().skip(seed % nodes.len()).next().unwrap().as_slice()).unwrap();
- let params = PaymentParameters::from_node_id(dst, 42).with_bolt11_features(features.clone()).unwrap();
+ seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
+ let src = PublicKey::from_slice(nodes.unordered_keys()
+ .skip((seed as usize) % nodes.len()).next().unwrap().as_slice()).unwrap();
+ seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
+ let dst = PublicKey::from_slice(nodes.unordered_keys()
+ .skip((seed as usize) % nodes.len()).next().unwrap().as_slice()).unwrap();
+ let params = PaymentParameters::from_node_id(dst, 42)
+ .with_bolt11_features(features.clone()).unwrap();
let first_hop = first_hop(src);
- let amt = seed as u64 % 1_000_000;
- if let Ok(route) = get_route(&payer, ¶ms, &graph.read_only(), Some(&[&first_hop]), amt, &DummyLogger{}, &scorer, &random_seed_bytes) {
- routes.push(route);
- route_endpoints.push((first_hop, params, amt));
- continue 'load_endpoints;
- }
- }
- }
+ let amt = starting_amount + seed % 1_000_000;
+ let path_exists =
+ get_route(&payer, ¶ms, &graph.read_only(), Some(&[&first_hop]),
+ amt, &TestLogger::new(), &scorer, score_params, &random_seed_bytes).is_ok();
+ if path_exists {
+ // ...and seed the scorer with success and failure data...
+ seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
+ let mut score_amt = seed % 1_000_000_000;
+ loop {
+ // Generate fail/success paths for a wider range of potential amounts with
+ // MPP enabled to give us a chance to apply penalties for more potential
+ // routes.
+ let mpp_features = channelmanager::provided_invoice_features(&UserConfig::default());
+ let params = PaymentParameters::from_node_id(dst, 42)
+ .with_bolt11_features(mpp_features).unwrap();
+
+ let route_res = get_route(&payer, ¶ms, &graph.read_only(),
+ Some(&[&first_hop]), score_amt, &TestLogger::new(), &scorer,
+ score_params, &random_seed_bytes);
+ if let Ok(route) = route_res {
+ for path in route.paths {
+ if seed & 0x80 == 0 {
+ scorer.payment_path_successful(&path);
+ } else {
+ let short_channel_id = path.hops[path.hops.len() / 2].short_channel_id;
+ scorer.payment_path_failed(&path, short_channel_id);
+ }
+ seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
+ }
+ break;
+ }
+ // If we couldn't find a path with a higer amount, reduce and try again.
+ score_amt /= 100;
+ }
- // ...and seed the scorer with success and failure data...
- for route in routes {
- let amount = route.get_total_amount();
- if amount < 250_000 {
- for path in route.paths {
- scorer.payment_path_successful(&path);
- }
- } else if amount > 750_000 {
- for path in route.paths {
- let short_channel_id = path.hops[path.hops.len() / 2].short_channel_id;
- scorer.payment_path_failed(&path, short_channel_id);
+ route_endpoints.push((first_hop, params, amt));
+ break;
}
}
}
- // Because we've changed channel scores, its possible we'll take different routes to the
+ // Because we've changed channel scores, it's possible we'll take different routes to the
// selected destinations, possibly causing us to fail because, eg, the newly-selected path
// requires a too-high CLTV delta.
route_endpoints.retain(|(first_hop, params, amt)| {
- get_route(&payer, params, &graph.read_only(), Some(&[first_hop]), *amt, &DummyLogger{}, &scorer, &random_seed_bytes).is_ok()
+ get_route(&payer, params, &graph.read_only(), Some(&[first_hop]), *amt,
+ &TestLogger::new(), &scorer, score_params, &random_seed_bytes).is_ok()
});
- route_endpoints.truncate(100);
- assert_eq!(route_endpoints.len(), 100);
+ route_endpoints.truncate(route_count);
+ assert_eq!(route_endpoints.len(), route_count);
+ route_endpoints
+ }
+}
+
+#[cfg(ldk_bench)]
+pub mod benches {
+ use super::*;
+ use crate::sign::{EntropySource, KeysManager};
+ use crate::ln::channelmanager;
+ use crate::ln::features::InvoiceFeatures;
+ use crate::routing::gossip::NetworkGraph;
+ use crate::routing::scoring::{FixedPenaltyScorer, ProbabilisticScorer, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters};
+ use crate::util::config::UserConfig;
+ use crate::util::logger::{Logger, Record};
+ use crate::util::test_utils::TestLogger;
+
+ use criterion::Criterion;
+
+ struct DummyLogger {}
+ impl Logger for DummyLogger {
+ fn log(&self, _record: &Record) {}
+ }
+
+ pub fn generate_routes_with_zero_penalty_scorer(bench: &mut Criterion) {
+ let logger = TestLogger::new();
+ let network_graph = bench_utils::read_network_graph(&logger).unwrap();
+ let scorer = FixedPenaltyScorer::with_penalty(0);
+ generate_routes(bench, &network_graph, scorer, &(), InvoiceFeatures::empty(), 0,
+ "generate_routes_with_zero_penalty_scorer");
+ }
+
+ pub fn generate_mpp_routes_with_zero_penalty_scorer(bench: &mut Criterion) {
+ let logger = TestLogger::new();
+ let network_graph = bench_utils::read_network_graph(&logger).unwrap();
+ let scorer = FixedPenaltyScorer::with_penalty(0);
+ generate_routes(bench, &network_graph, scorer, &(),
+ channelmanager::provided_invoice_features(&UserConfig::default()), 0,
+ "generate_mpp_routes_with_zero_penalty_scorer");
+ }
+
+ pub fn generate_routes_with_probabilistic_scorer(bench: &mut Criterion) {
+ let logger = TestLogger::new();
+ let network_graph = bench_utils::read_network_graph(&logger).unwrap();
+ let params = ProbabilisticScoringFeeParameters::default();
+ let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
+ generate_routes(bench, &network_graph, scorer, ¶ms, InvoiceFeatures::empty(), 0,
+ "generate_routes_with_probabilistic_scorer");
+ }
+
+ pub fn generate_mpp_routes_with_probabilistic_scorer(bench: &mut Criterion) {
+ let logger = TestLogger::new();
+ let network_graph = bench_utils::read_network_graph(&logger).unwrap();
+ let params = ProbabilisticScoringFeeParameters::default();
+ let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
+ generate_routes(bench, &network_graph, scorer, ¶ms,
+ channelmanager::provided_invoice_features(&UserConfig::default()), 0,
+ "generate_mpp_routes_with_probabilistic_scorer");
+ }
+
+ pub fn generate_large_mpp_routes_with_probabilistic_scorer(bench: &mut Criterion) {
+ let logger = TestLogger::new();
+ let network_graph = bench_utils::read_network_graph(&logger).unwrap();
+ let params = ProbabilisticScoringFeeParameters::default();
+ let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
+ generate_routes(bench, &network_graph, scorer, ¶ms,
+ channelmanager::provided_invoice_features(&UserConfig::default()), 100_000_000,
+ "generate_large_mpp_routes_with_probabilistic_scorer");
+ }
+
+ fn generate_routes<S: Score>(
+ bench: &mut Criterion, graph: &NetworkGraph<&TestLogger>, mut scorer: S,
+ score_params: &S::ScoreParams, features: InvoiceFeatures, starting_amount: u64,
+ bench_name: &'static str,
+ ) {
+ let payer = bench_utils::payer_pubkey();
+ let keys_manager = KeysManager::new(&[0u8; 32], 42, 42);
+ let random_seed_bytes = keys_manager.get_secure_random_bytes();
+
+ // First, get 100 (source, destination) pairs for which route-getting actually succeeds...
+ let route_endpoints = bench_utils::generate_test_routes(graph, &mut scorer, score_params, features, 0xdeadbeef, starting_amount, 50);
// ...then benchmark finding paths between the nodes we learned.
let mut idx = 0;
- bench.iter(|| {
+ bench.bench_function(bench_name, |b| b.iter(|| {
let (first_hop, params, amt) = &route_endpoints[idx % route_endpoints.len()];
- assert!(get_route(&payer, params, &graph.read_only(), Some(&[first_hop]), *amt, &DummyLogger{}, &scorer, &random_seed_bytes).is_ok());
+ assert!(get_route(&payer, params, &graph.read_only(), Some(&[first_hop]), *amt,
+ &DummyLogger{}, &scorer, score_params, &random_seed_bytes).is_ok());
idx += 1;
- });
+ }));
}
}
//! #
//! # use lightning::routing::gossip::NetworkGraph;
//! # use lightning::routing::router::{RouteParameters, find_route};
-//! # use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringParameters};
+//! # use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters};
//! # use lightning::sign::KeysManager;
//! # use lightning::util::logger::{Logger, Record};
//! # use bitcoin::secp256k1::PublicKey;
//! # let logger = FakeLogger {};
//! #
//! // Use the default channel penalties.
-//! let params = ProbabilisticScoringParameters::default();
-//! let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
+//! let params = ProbabilisticScoringFeeParameters::default();
+//! let decay_params = ProbabilisticScoringDecayParameters::default();
+//! let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
//!
//! // Or use custom channel penalties.
-//! let params = ProbabilisticScoringParameters {
-//! liquidity_penalty_multiplier_msat: 2 * 1000,
-//! ..ProbabilisticScoringParameters::default()
+//! let params = ProbabilisticScoringFeeParameters {
+//! liquidity_penalty_multiplier_msat: 2 * 1000,
+//! ..ProbabilisticScoringFeeParameters::default()
//! };
-//! let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
+//! let decay_params = ProbabilisticScoringDecayParameters::default();
+//! let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
//! # let random_seed_bytes = [42u8; 32];
//!
-//! let route = find_route(&payer, &route_params, &network_graph, None, &logger, &scorer, &random_seed_bytes);
+//! let route = find_route(&payer, &route_params, &network_graph, None, &logger, &scorer, ¶ms, &random_seed_bytes);
//! # }
//! ```
//!
///
/// Scoring is in terms of fees willing to be paid in order to avoid routing through a channel.
pub trait Score $(: $supertrait)* {
+ /// A configurable type which should contain various passed-in parameters for configuring the scorer,
+ /// on a per-routefinding-call basis through to the scorer methods,
+ /// which are used to determine the parameters for the suitability of channels for use.
+ type ScoreParams;
/// Returns the fee in msats willing to be paid to avoid routing `send_amt_msat` through the
/// given channel in the direction from `source` to `target`.
///
/// [`u64::max_value`] is given to indicate sufficient capacity for the invoice's full amount.
/// Thus, implementations should be overflow-safe.
fn channel_penalty_msat(
- &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage
+ &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams
) -> u64;
/// Handles updating channel penalties after failing to route through a channel.
}
impl<S: Score, T: DerefMut<Target=S> $(+ $supertrait)*> Score for T {
+ type ScoreParams = S::ScoreParams;
fn channel_penalty_msat(
- &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage
+ &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams
) -> u64 {
- self.deref().channel_penalty_msat(short_channel_id, source, target, usage)
+ self.deref().channel_penalty_msat(short_channel_id, source, target, usage, score_params)
}
fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) {
pub struct MultiThreadedScoreLock<'a, S: Score>(MutexGuard<'a, S>);
#[cfg(c_bindings)]
impl<'a, T: Score + 'a> Score for MultiThreadedScoreLock<'a, T> {
- fn channel_penalty_msat(&self, scid: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage) -> u64 {
- self.0.channel_penalty_msat(scid, source, target, usage)
+ type ScoreParams = <T as Score>::ScoreParams;
+ fn channel_penalty_msat(&self, scid: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams) -> u64 {
+ self.0.channel_penalty_msat(scid, source, target, usage, score_params)
}
fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) {
self.0.payment_path_failed(path, short_channel_id)
}
impl Score for FixedPenaltyScorer {
- fn channel_penalty_msat(&self, _: u64, _: &NodeId, _: &NodeId, _: ChannelUsage) -> u64 {
+ type ScoreParams = ();
+ fn channel_penalty_msat(&self, _: u64, _: &NodeId, _: &NodeId, _: ChannelUsage, _score_params: &Self::ScoreParams) -> u64 {
self.penalty_msat
}
/// behavior.
///
/// [1]: https://arxiv.org/abs/2107.05322
-/// [`liquidity_penalty_multiplier_msat`]: ProbabilisticScoringParameters::liquidity_penalty_multiplier_msat
-/// [`liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringParameters::liquidity_penalty_amount_multiplier_msat
-/// [`liquidity_offset_half_life`]: ProbabilisticScoringParameters::liquidity_offset_half_life
-/// [`historical_liquidity_penalty_multiplier_msat`]: ProbabilisticScoringParameters::historical_liquidity_penalty_multiplier_msat
-/// [`historical_liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringParameters::historical_liquidity_penalty_amount_multiplier_msat
+/// [`liquidity_penalty_multiplier_msat`]: ProbabilisticScoringFeeParameters::liquidity_penalty_multiplier_msat
+/// [`liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::liquidity_penalty_amount_multiplier_msat
+/// [`liquidity_offset_half_life`]: ProbabilisticScoringDecayParameters::liquidity_offset_half_life
+/// [`historical_liquidity_penalty_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_multiplier_msat
+/// [`historical_liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_amount_multiplier_msat
pub type ProbabilisticScorer<G, L> = ProbabilisticScorerUsingTime::<G, L, ConfiguredTime>;
/// Probabilistic [`Score`] implementation.
/// This is not exported to bindings users generally all users should use the [`ProbabilisticScorer`] type alias.
pub struct ProbabilisticScorerUsingTime<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time>
where L::Target: Logger {
- params: ProbabilisticScoringParameters,
+ decay_params: ProbabilisticScoringDecayParameters,
network_graph: G,
logger: L,
// TODO: Remove entries of closed channels.
/// The penalty applied to any channel by the [`ProbabilisticScorer`] is the sum of each of the
/// parameters here.
#[derive(Clone)]
-pub struct ProbabilisticScoringParameters {
+pub struct ProbabilisticScoringFeeParameters {
/// A fixed penalty in msats to apply to each channel.
///
/// Default value: 500 msat
///
/// Default value: 30,000 msat
///
- /// [`liquidity_offset_half_life`]: Self::liquidity_offset_half_life
+ /// [`liquidity_offset_half_life`]: ProbabilisticScoringDecayParameters::liquidity_offset_half_life
pub liquidity_penalty_multiplier_msat: u64,
- /// Whenever this amount of time elapses since the last update to a channel's liquidity bounds,
- /// the distance from the bounds to "zero" is cut in half. In other words, the lower-bound on
- /// the available liquidity is halved and the upper-bound moves half-way to the channel's total
- /// capacity.
- ///
- /// Because halving the liquidity bounds grows the uncertainty on the channel's liquidity,
- /// the penalty for an amount within the new bounds may change. See the [`ProbabilisticScorer`]
- /// struct documentation for more info on the way the liquidity bounds are used.
- ///
- /// For example, if the channel's capacity is 1 million sats, and the current upper and lower
- /// liquidity bounds are 200,000 sats and 600,000 sats, after this amount of time the upper
- /// and lower liquidity bounds will be decayed to 100,000 and 800,000 sats.
- ///
- /// Default value: 6 hours
- ///
- /// # Note
- ///
- /// When built with the `no-std` feature, time will never elapse. Therefore, the channel
- /// liquidity knowledge will never decay except when the bounds cross.
- pub liquidity_offset_half_life: Duration,
-
/// A multiplier used in conjunction with a payment amount and the negative `log10` of the
/// channel's success probability for the payment, as determined by our latest estimates of the
/// channel's liquidity, to determine the amount penalty.
/// [`liquidity_penalty_amount_multiplier_msat`]: Self::liquidity_penalty_amount_multiplier_msat
pub historical_liquidity_penalty_amount_multiplier_msat: u64,
- /// If we aren't learning any new datapoints for a channel, the historical liquidity bounds
- /// tracking can simply live on with increasingly stale data. Instead, when a channel has not
- /// seen a liquidity estimate update for this amount of time, the historical datapoints are
- /// decayed by half.
- ///
- /// Note that after 16 or more half lives all historical data will be completely gone.
- ///
- /// Default value: 14 days
- pub historical_no_updates_half_life: Duration,
-
/// Manual penalties used for the given nodes. Allows to set a particular penalty for a given
/// node. Note that a manual penalty of `u64::max_value()` means the node would not ever be
/// considered during path finding.
pub manual_node_penalties: HashMap<NodeId, u64>,
/// This penalty is applied when `htlc_maximum_msat` is equal to or larger than half of the
- /// channel's capacity, which makes us prefer nodes with a smaller `htlc_maximum_msat`. We
- /// treat such nodes preferentially as this makes balance discovery attacks harder to execute,
- /// thereby creating an incentive to restrict `htlc_maximum_msat` and improve privacy.
+ /// channel's capacity, (ie. htlc_maximum_msat ≥ 0.5 * channel_capacity) which makes us
+ /// prefer nodes with a smaller `htlc_maximum_msat`. We treat such nodes preferentially
+ /// as this makes balance discovery attacks harder to execute, thereby creating an incentive
+ /// to restrict `htlc_maximum_msat` and improve privacy.
///
/// Default value: 250 msat
pub anti_probing_penalty_msat: u64,
pub considered_impossible_penalty_msat: u64,
}
+impl Default for ProbabilisticScoringFeeParameters {
+ fn default() -> Self {
+ Self {
+ base_penalty_msat: 500,
+ base_penalty_amount_multiplier_msat: 8192,
+ liquidity_penalty_multiplier_msat: 30_000,
+ liquidity_penalty_amount_multiplier_msat: 192,
+ manual_node_penalties: HashMap::new(),
+ anti_probing_penalty_msat: 250,
+ considered_impossible_penalty_msat: 1_0000_0000_000,
+ historical_liquidity_penalty_multiplier_msat: 10_000,
+ historical_liquidity_penalty_amount_multiplier_msat: 64,
+ }
+ }
+}
+
+impl ProbabilisticScoringFeeParameters {
+ /// Marks the node with the given `node_id` as banned,
+ /// i.e it will be avoided during path finding.
+ pub fn add_banned(&mut self, node_id: &NodeId) {
+ self.manual_node_penalties.insert(*node_id, u64::max_value());
+ }
+
+ /// Marks all nodes in the given list as banned, i.e.,
+ /// they will be avoided during path finding.
+ pub fn add_banned_from_list(&mut self, node_ids: Vec<NodeId>) {
+ for id in node_ids {
+ self.manual_node_penalties.insert(id, u64::max_value());
+ }
+ }
+
+ /// Removes the node with the given `node_id` from the list of nodes to avoid.
+ pub fn remove_banned(&mut self, node_id: &NodeId) {
+ self.manual_node_penalties.remove(node_id);
+ }
+
+ /// Sets a manual penalty for the given node.
+ pub fn set_manual_penalty(&mut self, node_id: &NodeId, penalty: u64) {
+ self.manual_node_penalties.insert(*node_id, penalty);
+ }
+
+ /// Removes the node with the given `node_id` from the list of manual penalties.
+ pub fn remove_manual_penalty(&mut self, node_id: &NodeId) {
+ self.manual_node_penalties.remove(node_id);
+ }
+
+ /// Clears the list of manual penalties that are applied during path finding.
+ pub fn clear_manual_penalties(&mut self) {
+ self.manual_node_penalties = HashMap::new();
+ }
+}
+
+#[cfg(test)]
+impl ProbabilisticScoringFeeParameters {
+ fn zero_penalty() -> Self {
+ Self {
+ base_penalty_msat: 0,
+ base_penalty_amount_multiplier_msat: 0,
+ liquidity_penalty_multiplier_msat: 0,
+ liquidity_penalty_amount_multiplier_msat: 0,
+ historical_liquidity_penalty_multiplier_msat: 0,
+ historical_liquidity_penalty_amount_multiplier_msat: 0,
+ manual_node_penalties: HashMap::new(),
+ anti_probing_penalty_msat: 0,
+ considered_impossible_penalty_msat: 0,
+ }
+ }
+}
+
+/// Parameters for configuring [`ProbabilisticScorer`].
+///
+/// Used to configure decay parameters that are static throughout the lifetime of the scorer.
+/// these decay parameters affect the score of the channel penalty and are not changed on a
+/// per-route penalty cost call.
+#[derive(Copy, Clone)]
+pub struct ProbabilisticScoringDecayParameters {
+ /// If we aren't learning any new datapoints for a channel, the historical liquidity bounds
+ /// tracking can simply live on with increasingly stale data. Instead, when a channel has not
+ /// seen a liquidity estimate update for this amount of time, the historical datapoints are
+ /// decayed by half.
+ /// For an example of historical_no_updates_half_life being used see [`historical_estimated_channel_liquidity_probabilities`]
+ ///
+ /// Note that after 16 or more half lives all historical data will be completely gone.
+ ///
+ /// Default value: 14 days
+ ///
+ /// [`historical_estimated_channel_liquidity_probabilities`]: ProbabilisticScorerUsingTime::historical_estimated_channel_liquidity_probabilities
+ pub historical_no_updates_half_life: Duration,
+
+ /// Whenever this amount of time elapses since the last update to a channel's liquidity bounds,
+ /// the distance from the bounds to "zero" is cut in half. In other words, the lower-bound on
+ /// the available liquidity is halved and the upper-bound moves half-way to the channel's total
+ /// capacity.
+ ///
+ /// Because halving the liquidity bounds grows the uncertainty on the channel's liquidity,
+ /// the penalty for an amount within the new bounds may change. See the [`ProbabilisticScorer`]
+ /// struct documentation for more info on the way the liquidity bounds are used.
+ ///
+ /// For example, if the channel's capacity is 1 million sats, and the current upper and lower
+ /// liquidity bounds are 200,000 sats and 600,000 sats, after this amount of time the upper
+ /// and lower liquidity bounds will be decayed to 100,000 and 800,000 sats.
+ ///
+ /// Default value: 6 hours
+ ///
+ /// # Note
+ ///
+ /// When built with the `no-std` feature, time will never elapse. Therefore, the channel
+ /// liquidity knowledge will never decay except when the bounds cross.
+ pub liquidity_offset_half_life: Duration,
+}
+
+impl Default for ProbabilisticScoringDecayParameters {
+ fn default() -> Self {
+ Self {
+ liquidity_offset_half_life: Duration::from_secs(6 * 60 * 60),
+ historical_no_updates_half_life: Duration::from_secs(60 * 60 * 24 * 14),
+ }
+ }
+}
+
+#[cfg(test)]
+impl ProbabilisticScoringDecayParameters {
+ fn zero_penalty() -> Self {
+ Self {
+ liquidity_offset_half_life: Duration::from_secs(6 * 60 * 60),
+ historical_no_updates_half_life: Duration::from_secs(60 * 60 * 24 * 14),
+ }
+ }
+}
+
/// Tracks the historical state of a distribution as a weighted average of how much time was spent
/// in each of 8 buckets.
#[derive(Clone, Copy)]
/// A snapshot of [`ChannelLiquidity`] in one direction assuming a certain channel capacity and
/// decayed with a given half life.
-struct DirectedChannelLiquidity<'a, L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>, T: Time, U: Deref<Target = T>> {
+struct DirectedChannelLiquidity<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>, T: Time, U: Deref<Target = T>> {
min_liquidity_offset_msat: L,
max_liquidity_offset_msat: L,
min_liquidity_offset_history: BRT,
capacity_msat: u64,
last_updated: U,
now: T,
- params: &'a ProbabilisticScoringParameters,
+ decay_params: ProbabilisticScoringDecayParameters,
}
impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
/// Creates a new scorer using the given scoring parameters for sending payments from a node
/// through a network graph.
- pub fn new(params: ProbabilisticScoringParameters, network_graph: G, logger: L) -> Self {
+ pub fn new(decay_params: ProbabilisticScoringDecayParameters, network_graph: G, logger: L) -> Self {
Self {
- params,
+ decay_params,
network_graph,
logger,
channel_liquidities: HashMap::new(),
let log_direction = |source, target| {
if let Some((directed_info, _)) = chan_debug.as_directed_to(target) {
let amt = directed_info.effective_capacity().as_msat();
- let dir_liq = liq.as_directed(source, target, 0, amt, &self.params);
+ let dir_liq = liq.as_directed(source, target, 0, amt, self.decay_params);
let buckets = HistoricalMinMaxBuckets {
min_liquidity_offset_history: &dir_liq.min_liquidity_offset_history,
max_liquidity_offset_history: &dir_liq.max_liquidity_offset_history,
};
let (min_buckets, max_buckets, _) = buckets.get_decayed_buckets(now,
- *dir_liq.last_updated, self.params.historical_no_updates_half_life);
+ *dir_liq.last_updated, self.decay_params.historical_no_updates_half_life);
log_debug!(self.logger, core::concat!(
"Liquidity from {} to {} via {} is in the range ({}, {}).\n",
if let Some(liq) = self.channel_liquidities.get(&scid) {
if let Some((directed_info, source)) = chan.as_directed_to(target) {
let amt = directed_info.effective_capacity().as_msat();
- let dir_liq = liq.as_directed(source, target, 0, amt, &self.params);
+ let dir_liq = liq.as_directed(source, target, 0, amt, self.decay_params);
return Some((dir_liq.min_liquidity_msat(), dir_liq.max_liquidity_msat()));
}
}
if let Some(liq) = self.channel_liquidities.get(&scid) {
if let Some((directed_info, source)) = chan.as_directed_to(target) {
let amt = directed_info.effective_capacity().as_msat();
- let dir_liq = liq.as_directed(source, target, 0, amt, &self.params);
+ let dir_liq = liq.as_directed(source, target, 0, amt, self.decay_params);
let buckets = HistoricalMinMaxBuckets {
min_liquidity_offset_history: &dir_liq.min_liquidity_offset_history,
max_liquidity_offset_history: &dir_liq.max_liquidity_offset_history,
};
let (min_buckets, mut max_buckets, _) = buckets.get_decayed_buckets(T::now(),
- *dir_liq.last_updated, self.params.historical_no_updates_half_life);
+ *dir_liq.last_updated, self.decay_params.historical_no_updates_half_life);
// Note that the liquidity buckets are an offset from the edge, so we inverse
// the max order to get the probabilities from zero.
max_buckets.reverse();
}
None
}
-
- /// Marks the node with the given `node_id` as banned, i.e.,
- /// it will be avoided during path finding.
- pub fn add_banned(&mut self, node_id: &NodeId) {
- self.params.manual_node_penalties.insert(*node_id, u64::max_value());
- }
-
- /// Removes the node with the given `node_id` from the list of nodes to avoid.
- pub fn remove_banned(&mut self, node_id: &NodeId) {
- self.params.manual_node_penalties.remove(node_id);
- }
-
- /// Sets a manual penalty for the given node.
- pub fn set_manual_penalty(&mut self, node_id: &NodeId, penalty: u64) {
- self.params.manual_node_penalties.insert(*node_id, penalty);
- }
-
- /// Removes the node with the given `node_id` from the list of manual penalties.
- pub fn remove_manual_penalty(&mut self, node_id: &NodeId) {
- self.params.manual_node_penalties.remove(node_id);
- }
-
- /// Clears the list of manual penalties that are applied during path finding.
- pub fn clear_manual_penalties(&mut self) {
- self.params.manual_node_penalties = HashMap::new();
- }
-}
-
-impl ProbabilisticScoringParameters {
- #[cfg(test)]
- fn zero_penalty() -> Self {
- Self {
- base_penalty_msat: 0,
- base_penalty_amount_multiplier_msat: 0,
- liquidity_penalty_multiplier_msat: 0,
- liquidity_offset_half_life: Duration::from_secs(6 * 60 * 60),
- liquidity_penalty_amount_multiplier_msat: 0,
- historical_liquidity_penalty_multiplier_msat: 0,
- historical_liquidity_penalty_amount_multiplier_msat: 0,
- historical_no_updates_half_life: Duration::from_secs(60 * 60 * 24 * 14),
- manual_node_penalties: HashMap::new(),
- anti_probing_penalty_msat: 0,
- considered_impossible_penalty_msat: 0,
- }
- }
-
- /// Marks all nodes in the given list as banned, i.e.,
- /// they will be avoided during path finding.
- pub fn add_banned_from_list(&mut self, node_ids: Vec<NodeId>) {
- for id in node_ids {
- self.manual_node_penalties.insert(id, u64::max_value());
- }
- }
-}
-
-impl Default for ProbabilisticScoringParameters {
- fn default() -> Self {
- Self {
- base_penalty_msat: 500,
- base_penalty_amount_multiplier_msat: 8192,
- liquidity_penalty_multiplier_msat: 30_000,
- liquidity_offset_half_life: Duration::from_secs(6 * 60 * 60),
- liquidity_penalty_amount_multiplier_msat: 192,
- historical_liquidity_penalty_multiplier_msat: 10_000,
- historical_liquidity_penalty_amount_multiplier_msat: 64,
- historical_no_updates_half_life: Duration::from_secs(60 * 60 * 24 * 14),
- manual_node_penalties: HashMap::new(),
- anti_probing_penalty_msat: 250,
- considered_impossible_penalty_msat: 1_0000_0000_000,
- }
- }
}
impl<T: Time> ChannelLiquidity<T> {
/// Returns a view of the channel liquidity directed from `source` to `target` assuming
/// `capacity_msat`.
- fn as_directed<'a>(
- &self, source: &NodeId, target: &NodeId, inflight_htlc_msat: u64, capacity_msat: u64,
- params: &'a ProbabilisticScoringParameters
- ) -> DirectedChannelLiquidity<'a, &u64, &HistoricalBucketRangeTracker, T, &T> {
+ fn as_directed(
+ &self, source: &NodeId, target: &NodeId, inflight_htlc_msat: u64, capacity_msat: u64, decay_params: ProbabilisticScoringDecayParameters
+ ) -> DirectedChannelLiquidity<&u64, &HistoricalBucketRangeTracker, T, &T> {
let (min_liquidity_offset_msat, max_liquidity_offset_msat, min_liquidity_offset_history, max_liquidity_offset_history) =
if source < target {
(&self.min_liquidity_offset_msat, &self.max_liquidity_offset_msat,
capacity_msat,
last_updated: &self.last_updated,
now: T::now(),
- params,
+ decay_params: decay_params,
}
}
/// Returns a mutable view of the channel liquidity directed from `source` to `target` assuming
/// `capacity_msat`.
- fn as_directed_mut<'a>(
- &mut self, source: &NodeId, target: &NodeId, inflight_htlc_msat: u64, capacity_msat: u64,
- params: &'a ProbabilisticScoringParameters
- ) -> DirectedChannelLiquidity<'a, &mut u64, &mut HistoricalBucketRangeTracker, T, &mut T> {
+ fn as_directed_mut(
+ &mut self, source: &NodeId, target: &NodeId, inflight_htlc_msat: u64, capacity_msat: u64, decay_params: ProbabilisticScoringDecayParameters
+ ) -> DirectedChannelLiquidity<&mut u64, &mut HistoricalBucketRangeTracker, T, &mut T> {
let (min_liquidity_offset_msat, max_liquidity_offset_msat, min_liquidity_offset_history, max_liquidity_offset_history) =
if source < target {
(&mut self.min_liquidity_offset_msat, &mut self.max_liquidity_offset_msat,
capacity_msat,
last_updated: &mut self.last_updated,
now: T::now(),
- params,
+ decay_params: decay_params,
}
}
}
const AMOUNT_PENALTY_DIVISOR: u64 = 1 << 20;
const BASE_AMOUNT_PENALTY_DIVISOR: u64 = 1 << 30;
-impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>, T: Time, U: Deref<Target = T>> DirectedChannelLiquidity<'_, L, BRT, T, U> {
+impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>, T: Time, U: Deref<Target = T>> DirectedChannelLiquidity< L, BRT, T, U> {
/// Returns a liquidity penalty for routing the given HTLC `amount_msat` through the channel in
/// this direction.
- fn penalty_msat(&self, amount_msat: u64, params: &ProbabilisticScoringParameters) -> u64 {
+ fn penalty_msat(&self, amount_msat: u64, score_params: &ProbabilisticScoringFeeParameters) -> u64 {
let max_liquidity_msat = self.max_liquidity_msat();
let min_liquidity_msat = core::cmp::min(self.min_liquidity_msat(), max_liquidity_msat);
// impossibility penalty.
let negative_log10_times_2048 = NEGATIVE_LOG10_UPPER_BOUND * 2048;
Self::combined_penalty_msat(amount_msat, negative_log10_times_2048,
- params.liquidity_penalty_multiplier_msat,
- params.liquidity_penalty_amount_multiplier_msat)
- .saturating_add(params.considered_impossible_penalty_msat)
+ score_params.liquidity_penalty_multiplier_msat,
+ score_params.liquidity_penalty_amount_multiplier_msat)
+ .saturating_add(score_params.considered_impossible_penalty_msat)
} else {
let numerator = (max_liquidity_msat - amount_msat).saturating_add(1);
let denominator = (max_liquidity_msat - min_liquidity_msat).saturating_add(1);
let negative_log10_times_2048 =
approx::negative_log10_times_2048(numerator, denominator);
Self::combined_penalty_msat(amount_msat, negative_log10_times_2048,
- params.liquidity_penalty_multiplier_msat,
- params.liquidity_penalty_amount_multiplier_msat)
+ score_params.liquidity_penalty_multiplier_msat,
+ score_params.liquidity_penalty_amount_multiplier_msat)
}
};
- if params.historical_liquidity_penalty_multiplier_msat != 0 ||
- params.historical_liquidity_penalty_amount_multiplier_msat != 0 {
+ if score_params.historical_liquidity_penalty_multiplier_msat != 0 ||
+ score_params.historical_liquidity_penalty_amount_multiplier_msat != 0 {
let payment_amt_64th_bucket = if amount_msat < u64::max_value() / 64 {
amount_msat * 64 / self.capacity_msat.saturating_add(1)
} else {
};
if let Some(cumulative_success_prob_times_billion) = buckets
.calculate_success_probability_times_billion(self.now, *self.last_updated,
- params.historical_no_updates_half_life, payment_amt_64th_bucket as u8)
+ self.decay_params.historical_no_updates_half_life, payment_amt_64th_bucket as u8)
{
let historical_negative_log10_times_2048 = approx::negative_log10_times_2048(cumulative_success_prob_times_billion + 1, 1024 * 1024 * 1024);
res = res.saturating_add(Self::combined_penalty_msat(amount_msat,
- historical_negative_log10_times_2048, params.historical_liquidity_penalty_multiplier_msat,
- params.historical_liquidity_penalty_amount_multiplier_msat));
+ historical_negative_log10_times_2048, score_params.historical_liquidity_penalty_multiplier_msat,
+ score_params.historical_liquidity_penalty_amount_multiplier_msat));
} else {
// If we don't have any valid points (or, once decayed, we have less than a full
// point), redo the non-historical calculation with no liquidity bounds tracked and
let negative_log10_times_2048 =
approx::negative_log10_times_2048(numerator, denominator);
res = res.saturating_add(Self::combined_penalty_msat(amount_msat, negative_log10_times_2048,
- params.historical_liquidity_penalty_multiplier_msat,
- params.historical_liquidity_penalty_amount_multiplier_msat));
+ score_params.historical_liquidity_penalty_multiplier_msat,
+ score_params.historical_liquidity_penalty_amount_multiplier_msat));
}
}
fn decayed_offset_msat(&self, offset_msat: u64) -> u64 {
self.now.duration_since(*self.last_updated).as_secs()
- .checked_div(self.params.liquidity_offset_half_life.as_secs())
+ .checked_div(self.decay_params.liquidity_offset_half_life.as_secs())
.and_then(|decays| offset_msat.checked_shr(decays as u32))
.unwrap_or(0)
}
}
-impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTracker>, T: Time, U: DerefMut<Target = T>> DirectedChannelLiquidity<'_, L, BRT, T, U> {
+impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTracker>, T: Time, U: DerefMut<Target = T>> DirectedChannelLiquidity<L, BRT, T, U> {
/// Adjusts the channel liquidity balance bounds when failing to route `amount_msat`.
fn failed_at_channel<Log: Deref>(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger {
let existing_max_msat = self.max_liquidity_msat();
fn update_history_buckets(&mut self) {
let half_lives = self.now.duration_since(*self.last_updated).as_secs()
- .checked_div(self.params.historical_no_updates_half_life.as_secs())
+ .checked_div(self.decay_params.historical_no_updates_half_life.as_secs())
.map(|v| v.try_into().unwrap_or(u32::max_value())).unwrap_or(u32::max_value());
self.min_liquidity_offset_history.time_decay_data(half_lives);
self.max_liquidity_offset_history.time_decay_data(half_lives);
}
impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> Score for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
+ type ScoreParams = ProbabilisticScoringFeeParameters;
fn channel_penalty_msat(
- &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage
+ &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &ProbabilisticScoringFeeParameters
) -> u64 {
- if let Some(penalty) = self.params.manual_node_penalties.get(target) {
+ if let Some(penalty) = score_params.manual_node_penalties.get(target) {
return *penalty;
}
- let base_penalty_msat = self.params.base_penalty_msat.saturating_add(
- self.params.base_penalty_amount_multiplier_msat
+ let base_penalty_msat = score_params.base_penalty_msat.saturating_add(
+ score_params.base_penalty_amount_multiplier_msat
.saturating_mul(usage.amount_msat) / BASE_AMOUNT_PENALTY_DIVISOR);
let mut anti_probing_penalty_msat = 0;
},
EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat } => {
if htlc_maximum_msat >= capacity_msat/2 {
- anti_probing_penalty_msat = self.params.anti_probing_penalty_msat;
+ anti_probing_penalty_msat = score_params.anti_probing_penalty_msat;
}
},
_ => {},
self.channel_liquidities
.get(&short_channel_id)
.unwrap_or(&ChannelLiquidity::new())
- .as_directed(source, target, inflight_htlc_msat, capacity_msat, &self.params)
- .penalty_msat(amount_msat, &self.params)
+ .as_directed(source, target, inflight_htlc_msat, capacity_msat, self.decay_params)
+ .penalty_msat(amount_msat, score_params)
.saturating_add(anti_probing_penalty_msat)
.saturating_add(base_penalty_msat)
}
self.channel_liquidities
.entry(hop.short_channel_id)
.or_insert_with(ChannelLiquidity::new)
- .as_directed_mut(source, &target, 0, capacity_msat, &self.params)
+ .as_directed_mut(source, &target, 0, capacity_msat, self.decay_params)
.failed_at_channel(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
} else {
self.channel_liquidities
.entry(hop.short_channel_id)
.or_insert_with(ChannelLiquidity::new)
- .as_directed_mut(source, &target, 0, capacity_msat, &self.params)
+ .as_directed_mut(source, &target, 0, capacity_msat, self.decay_params)
.failed_downstream(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
}
} else {
self.channel_liquidities
.entry(hop.short_channel_id)
.or_insert_with(ChannelLiquidity::new)
- .as_directed_mut(source, &target, 0, capacity_msat, &self.params)
+ .as_directed_mut(source, &target, 0, capacity_msat, self.decay_params)
.successful(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
} else {
log_debug!(self.logger, "Not able to learn for channel with SCID {} as we do not have graph info for it (likely a route-hint last-hop).",
}
impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time>
-ReadableArgs<(ProbabilisticScoringParameters, G, L)> for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
+ReadableArgs<(ProbabilisticScoringDecayParameters, G, L)> for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
#[inline]
fn read<R: Read>(
- r: &mut R, args: (ProbabilisticScoringParameters, G, L)
+ r: &mut R, args: (ProbabilisticScoringDecayParameters, G, L)
) -> Result<Self, DecodeError> {
- let (params, network_graph, logger) = args;
+ let (decay_params, network_graph, logger) = args;
let mut channel_liquidities = HashMap::new();
read_tlv_fields!(r, {
(0, channel_liquidities, required),
});
Ok(Self {
- params,
+ decay_params,
network_graph,
logger,
channel_liquidities,
#[cfg(test)]
mod tests {
- use super::{ChannelLiquidity, HistoricalBucketRangeTracker, ProbabilisticScoringParameters, ProbabilisticScorerUsingTime};
+ use super::{ChannelLiquidity, HistoricalBucketRangeTracker, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters, ProbabilisticScorerUsingTime};
use crate::blinded_path::{BlindedHop, BlindedPath};
use crate::util::config::UserConfig;
use crate::util::time::Time;
let logger = TestLogger::new();
let last_updated = SinceEpoch::now();
let network_graph = network_graph(&logger);
- let params = ProbabilisticScoringParameters::default();
- let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger)
+ let decay_params = ProbabilisticScoringDecayParameters::default();
+ let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger)
.with_channel(42,
ChannelLiquidity {
min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100, last_updated,
// Update minimum liquidity.
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 0, 1_000, &scorer.params);
+ .as_directed(&source, &target, 0, 1_000, decay_params);
assert_eq!(liquidity.min_liquidity_msat(), 100);
assert_eq!(liquidity.max_liquidity_msat(), 300);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 0, 1_000, &scorer.params);
+ .as_directed(&target, &source, 0, 1_000, decay_params);
assert_eq!(liquidity.min_liquidity_msat(), 700);
assert_eq!(liquidity.max_liquidity_msat(), 900);
scorer.channel_liquidities.get_mut(&42).unwrap()
- .as_directed_mut(&source, &target, 0, 1_000, &scorer.params)
+ .as_directed_mut(&source, &target, 0, 1_000, decay_params)
.set_min_liquidity_msat(200);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 0, 1_000, &scorer.params);
+ .as_directed(&source, &target, 0, 1_000, decay_params);
assert_eq!(liquidity.min_liquidity_msat(), 200);
assert_eq!(liquidity.max_liquidity_msat(), 300);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 0, 1_000, &scorer.params);
+ .as_directed(&target, &source, 0, 1_000, decay_params);
assert_eq!(liquidity.min_liquidity_msat(), 700);
assert_eq!(liquidity.max_liquidity_msat(), 800);
// Update maximum liquidity.
let liquidity = scorer.channel_liquidities.get(&43).unwrap()
- .as_directed(&target, &recipient, 0, 1_000, &scorer.params);
+ .as_directed(&target, &recipient, 0, 1_000, decay_params);
assert_eq!(liquidity.min_liquidity_msat(), 700);
assert_eq!(liquidity.max_liquidity_msat(), 900);
let liquidity = scorer.channel_liquidities.get(&43).unwrap()
- .as_directed(&recipient, &target, 0, 1_000, &scorer.params);
+ .as_directed(&recipient, &target, 0, 1_000, decay_params);
assert_eq!(liquidity.min_liquidity_msat(), 100);
assert_eq!(liquidity.max_liquidity_msat(), 300);
scorer.channel_liquidities.get_mut(&43).unwrap()
- .as_directed_mut(&target, &recipient, 0, 1_000, &scorer.params)
+ .as_directed_mut(&target, &recipient, 0, 1_000, decay_params)
.set_max_liquidity_msat(200);
let liquidity = scorer.channel_liquidities.get(&43).unwrap()
- .as_directed(&target, &recipient, 0, 1_000, &scorer.params);
+ .as_directed(&target, &recipient, 0, 1_000, decay_params);
assert_eq!(liquidity.min_liquidity_msat(), 0);
assert_eq!(liquidity.max_liquidity_msat(), 200);
let liquidity = scorer.channel_liquidities.get(&43).unwrap()
- .as_directed(&recipient, &target, 0, 1_000, &scorer.params);
+ .as_directed(&recipient, &target, 0, 1_000, decay_params);
assert_eq!(liquidity.min_liquidity_msat(), 800);
assert_eq!(liquidity.max_liquidity_msat(), 1000);
}
let logger = TestLogger::new();
let last_updated = SinceEpoch::now();
let network_graph = network_graph(&logger);
- let params = ProbabilisticScoringParameters::default();
- let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger)
+ let decay_params = ProbabilisticScoringDecayParameters::default();
+ let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger)
.with_channel(42,
ChannelLiquidity {
min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated,
// Check initial bounds.
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 0, 1_000, &scorer.params);
+ .as_directed(&source, &target, 0, 1_000, decay_params);
assert_eq!(liquidity.min_liquidity_msat(), 400);
assert_eq!(liquidity.max_liquidity_msat(), 800);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 0, 1_000, &scorer.params);
+ .as_directed(&target, &source, 0, 1_000, decay_params);
assert_eq!(liquidity.min_liquidity_msat(), 200);
assert_eq!(liquidity.max_liquidity_msat(), 600);
// Reset from source to target.
scorer.channel_liquidities.get_mut(&42).unwrap()
- .as_directed_mut(&source, &target, 0, 1_000, &scorer.params)
+ .as_directed_mut(&source, &target, 0, 1_000, decay_params)
.set_min_liquidity_msat(900);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 0, 1_000, &scorer.params);
+ .as_directed(&source, &target, 0, 1_000, decay_params);
assert_eq!(liquidity.min_liquidity_msat(), 900);
assert_eq!(liquidity.max_liquidity_msat(), 1_000);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 0, 1_000, &scorer.params);
+ .as_directed(&target, &source, 0, 1_000, decay_params);
assert_eq!(liquidity.min_liquidity_msat(), 0);
assert_eq!(liquidity.max_liquidity_msat(), 100);
// Reset from target to source.
scorer.channel_liquidities.get_mut(&42).unwrap()
- .as_directed_mut(&target, &source, 0, 1_000, &scorer.params)
+ .as_directed_mut(&target, &source, 0, 1_000, decay_params)
.set_min_liquidity_msat(400);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 0, 1_000, &scorer.params);
+ .as_directed(&source, &target, 0, 1_000, decay_params);
assert_eq!(liquidity.min_liquidity_msat(), 0);
assert_eq!(liquidity.max_liquidity_msat(), 600);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 0, 1_000, &scorer.params);
+ .as_directed(&target, &source, 0, 1_000, decay_params);
assert_eq!(liquidity.min_liquidity_msat(), 400);
assert_eq!(liquidity.max_liquidity_msat(), 1_000);
}
let logger = TestLogger::new();
let last_updated = SinceEpoch::now();
let network_graph = network_graph(&logger);
- let params = ProbabilisticScoringParameters::default();
- let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger)
+ let decay_params = ProbabilisticScoringDecayParameters::default();
+ let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger)
.with_channel(42,
ChannelLiquidity {
min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated,
// Check initial bounds.
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 0, 1_000, &scorer.params);
+ .as_directed(&source, &target, 0, 1_000, decay_params);
assert_eq!(liquidity.min_liquidity_msat(), 400);
assert_eq!(liquidity.max_liquidity_msat(), 800);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 0, 1_000, &scorer.params);
+ .as_directed(&target, &source, 0, 1_000, decay_params);
assert_eq!(liquidity.min_liquidity_msat(), 200);
assert_eq!(liquidity.max_liquidity_msat(), 600);
// Reset from source to target.
scorer.channel_liquidities.get_mut(&42).unwrap()
- .as_directed_mut(&source, &target, 0, 1_000, &scorer.params)
+ .as_directed_mut(&source, &target, 0, 1_000, decay_params)
.set_max_liquidity_msat(300);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 0, 1_000, &scorer.params);
+ .as_directed(&source, &target, 0, 1_000, decay_params);
assert_eq!(liquidity.min_liquidity_msat(), 0);
assert_eq!(liquidity.max_liquidity_msat(), 300);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 0, 1_000, &scorer.params);
+ .as_directed(&target, &source, 0, 1_000, decay_params);
assert_eq!(liquidity.min_liquidity_msat(), 700);
assert_eq!(liquidity.max_liquidity_msat(), 1_000);
// Reset from target to source.
scorer.channel_liquidities.get_mut(&42).unwrap()
- .as_directed_mut(&target, &source, 0, 1_000, &scorer.params)
+ .as_directed_mut(&target, &source, 0, 1_000, decay_params)
.set_max_liquidity_msat(600);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 0, 1_000, &scorer.params);
+ .as_directed(&source, &target, 0, 1_000, decay_params);
assert_eq!(liquidity.min_liquidity_msat(), 400);
assert_eq!(liquidity.max_liquidity_msat(), 1_000);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&target, &source, 0, 1_000, &scorer.params);
+ .as_directed(&target, &source, 0, 1_000, decay_params);
assert_eq!(liquidity.min_liquidity_msat(), 0);
assert_eq!(liquidity.max_liquidity_msat(), 600);
}
fn increased_penalty_nearing_liquidity_upper_bound() {
let logger = TestLogger::new();
let network_graph = network_graph(&logger);
- let params = ProbabilisticScoringParameters {
+ let params = ProbabilisticScoringFeeParameters {
liquidity_penalty_multiplier_msat: 1_000,
- ..ProbabilisticScoringParameters::zero_penalty()
+ ..ProbabilisticScoringFeeParameters::zero_penalty()
};
- let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
+ let decay_params = ProbabilisticScoringDecayParameters::default();
+ let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
inflight_htlc_msat: 0,
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_000 },
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0);
let usage = ChannelUsage { amount_msat: 10_240, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0);
let usage = ChannelUsage { amount_msat: 102_400, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 47);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 47);
let usage = ChannelUsage { amount_msat: 1_023_999, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 2_000);
let usage = ChannelUsage {
amount_msat: 128,
inflight_htlc_msat: 0,
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 58);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 58);
let usage = ChannelUsage { amount_msat: 256, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 125);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 125);
let usage = ChannelUsage { amount_msat: 374, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 198);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 198);
let usage = ChannelUsage { amount_msat: 512, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 300);
let usage = ChannelUsage { amount_msat: 640, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 425);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 425);
let usage = ChannelUsage { amount_msat: 768, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 602);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 602);
let usage = ChannelUsage { amount_msat: 896, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 902);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 902);
}
#[test]
let logger = TestLogger::new();
let last_updated = SinceEpoch::now();
let network_graph = network_graph(&logger);
- let params = ProbabilisticScoringParameters {
+ let params = ProbabilisticScoringFeeParameters {
liquidity_penalty_multiplier_msat: 1_000,
considered_impossible_penalty_msat: u64::max_value(),
- ..ProbabilisticScoringParameters::zero_penalty()
+ ..ProbabilisticScoringFeeParameters::zero_penalty()
};
- let scorer = ProbabilisticScorer::new(params, &network_graph, &logger)
+ let decay_params = ProbabilisticScoringDecayParameters {
+ ..ProbabilisticScoringDecayParameters::zero_penalty()
+ };
+ let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger)
.with_channel(42,
ChannelLiquidity {
min_liquidity_offset_msat: 40, max_liquidity_offset_msat: 40, last_updated,
inflight_htlc_msat: 0,
effective_capacity: EffectiveCapacity::Total { capacity_msat: 100, htlc_maximum_msat: 1_000 },
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0);
let usage = ChannelUsage { amount_msat: 50, ..usage };
- assert_ne!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
- assert_ne!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
+ assert_ne!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0);
+ assert_ne!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value());
let usage = ChannelUsage { amount_msat: 61, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value());
}
#[test]
fn does_not_further_penalize_own_channel() {
let logger = TestLogger::new();
let network_graph = network_graph(&logger);
- let params = ProbabilisticScoringParameters {
+ let params = ProbabilisticScoringFeeParameters {
liquidity_penalty_multiplier_msat: 1_000,
- ..ProbabilisticScoringParameters::zero_penalty()
+ ..ProbabilisticScoringFeeParameters::zero_penalty()
};
- let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
+ let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
let sender = sender_node_id();
let source = source_node_id();
let usage = ChannelUsage {
let failed_path = payment_path_for_amount(500);
let successful_path = payment_path_for_amount(200);
- assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage), 301);
+ assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage, ¶ms), 301);
scorer.payment_path_failed(&failed_path, 41);
- assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage), 301);
+ assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage, ¶ms), 301);
scorer.payment_path_successful(&successful_path);
- assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage), 301);
+ assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage, ¶ms), 301);
}
#[test]
fn sets_liquidity_lower_bound_on_downstream_failure() {
let logger = TestLogger::new();
let network_graph = network_graph(&logger);
- let params = ProbabilisticScoringParameters {
+ let params = ProbabilisticScoringFeeParameters {
liquidity_penalty_multiplier_msat: 1_000,
- ..ProbabilisticScoringParameters::zero_penalty()
+ ..ProbabilisticScoringFeeParameters::zero_penalty()
};
- let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
+ let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
let path = payment_path_for_amount(500);
inflight_htlc_msat: 0,
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 128);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 128);
let usage = ChannelUsage { amount_msat: 500, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 301);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 301);
let usage = ChannelUsage { amount_msat: 750, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 602);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 602);
scorer.payment_path_failed(&path, 43);
let usage = ChannelUsage { amount_msat: 250, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0);
let usage = ChannelUsage { amount_msat: 500, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0);
let usage = ChannelUsage { amount_msat: 750, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 300);
}
#[test]
fn sets_liquidity_upper_bound_on_failure() {
let logger = TestLogger::new();
let network_graph = network_graph(&logger);
- let params = ProbabilisticScoringParameters {
+ let params = ProbabilisticScoringFeeParameters {
liquidity_penalty_multiplier_msat: 1_000,
considered_impossible_penalty_msat: u64::max_value(),
- ..ProbabilisticScoringParameters::zero_penalty()
+ ..ProbabilisticScoringFeeParameters::zero_penalty()
};
- let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
+ let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
let path = payment_path_for_amount(500);
inflight_htlc_msat: 0,
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 128);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 128);
let usage = ChannelUsage { amount_msat: 500, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 301);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 301);
let usage = ChannelUsage { amount_msat: 750, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 602);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 602);
scorer.payment_path_failed(&path, 42);
let usage = ChannelUsage { amount_msat: 250, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 300);
let usage = ChannelUsage { amount_msat: 500, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value());
let usage = ChannelUsage { amount_msat: 750, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value());
}
#[test]
let node_c = NodeId::from_pubkey(&pub_c);
let node_d = NodeId::from_pubkey(&pub_d);
- let params = ProbabilisticScoringParameters {
+ let params = ProbabilisticScoringFeeParameters {
liquidity_penalty_multiplier_msat: 1_000,
- ..ProbabilisticScoringParameters::zero_penalty()
+ ..ProbabilisticScoringFeeParameters::zero_penalty()
};
- let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
+ let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
let usage = ChannelUsage {
amount_msat: 250,
inflight_htlc_msat: 0,
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
};
- assert_eq!(scorer.channel_penalty_msat(42, &node_a, &node_b, usage), 128);
+ assert_eq!(scorer.channel_penalty_msat(42, &node_a, &node_b, usage, ¶ms), 128);
// Note that a default liquidity bound is used for B -> C as no channel exists
- assert_eq!(scorer.channel_penalty_msat(43, &node_b, &node_c, usage), 128);
- assert_eq!(scorer.channel_penalty_msat(44, &node_c, &node_d, usage), 128);
+ assert_eq!(scorer.channel_penalty_msat(43, &node_b, &node_c, usage, ¶ms), 128);
+ assert_eq!(scorer.channel_penalty_msat(44, &node_c, &node_d, usage, ¶ms), 128);
scorer.payment_path_failed(&Path { hops: path, blinded_tail: None }, 43);
- assert_eq!(scorer.channel_penalty_msat(42, &node_a, &node_b, usage), 80);
+ assert_eq!(scorer.channel_penalty_msat(42, &node_a, &node_b, usage, ¶ms), 80);
// Note that a default liquidity bound is used for B -> C as no channel exists
- assert_eq!(scorer.channel_penalty_msat(43, &node_b, &node_c, usage), 128);
- assert_eq!(scorer.channel_penalty_msat(44, &node_c, &node_d, usage), 128);
+ assert_eq!(scorer.channel_penalty_msat(43, &node_b, &node_c, usage, ¶ms), 128);
+ assert_eq!(scorer.channel_penalty_msat(44, &node_c, &node_d, usage, ¶ms), 128);
}
#[test]
fn reduces_liquidity_upper_bound_along_path_on_success() {
let logger = TestLogger::new();
let network_graph = network_graph(&logger);
- let params = ProbabilisticScoringParameters {
+ let params = ProbabilisticScoringFeeParameters {
liquidity_penalty_multiplier_msat: 1_000,
- ..ProbabilisticScoringParameters::zero_penalty()
+ ..ProbabilisticScoringFeeParameters::zero_penalty()
};
- let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
+ let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
let sender = sender_node_id();
let source = source_node_id();
let target = target_node_id();
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
};
- assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage), 128);
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 128);
- assert_eq!(scorer.channel_penalty_msat(43, &target, &recipient, usage), 128);
+ assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage, ¶ms), 128);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 128);
+ assert_eq!(scorer.channel_penalty_msat(43, &target, &recipient, usage, ¶ms), 128);
scorer.payment_path_successful(&payment_path_for_amount(500));
- assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage), 128);
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300);
- assert_eq!(scorer.channel_penalty_msat(43, &target, &recipient, usage), 300);
+ assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage, ¶ms), 128);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 300);
+ assert_eq!(scorer.channel_penalty_msat(43, &target, &recipient, usage, ¶ms), 300);
}
#[test]
fn decays_liquidity_bounds_over_time() {
let logger = TestLogger::new();
let network_graph = network_graph(&logger);
- let params = ProbabilisticScoringParameters {
+ let params = ProbabilisticScoringFeeParameters {
liquidity_penalty_multiplier_msat: 1_000,
- liquidity_offset_half_life: Duration::from_secs(10),
considered_impossible_penalty_msat: u64::max_value(),
- ..ProbabilisticScoringParameters::zero_penalty()
+ ..ProbabilisticScoringFeeParameters::zero_penalty()
};
- let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
+ let decay_params = ProbabilisticScoringDecayParameters {
+ liquidity_offset_half_life: Duration::from_secs(10),
+ ..ProbabilisticScoringDecayParameters::zero_penalty()
+ };
+ let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
inflight_htlc_msat: 0,
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0);
let usage = ChannelUsage { amount_msat: 1_023, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 2_000);
scorer.payment_path_failed(&payment_path_for_amount(768), 42);
scorer.payment_path_failed(&payment_path_for_amount(128), 43);
let usage = ChannelUsage { amount_msat: 128, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0);
let usage = ChannelUsage { amount_msat: 256, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 93);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 93);
let usage = ChannelUsage { amount_msat: 768, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_479);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1_479);
let usage = ChannelUsage { amount_msat: 896, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value());
SinceEpoch::advance(Duration::from_secs(9));
let usage = ChannelUsage { amount_msat: 128, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0);
let usage = ChannelUsage { amount_msat: 256, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 93);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 93);
let usage = ChannelUsage { amount_msat: 768, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_479);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1_479);
let usage = ChannelUsage { amount_msat: 896, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value());
SinceEpoch::advance(Duration::from_secs(1));
let usage = ChannelUsage { amount_msat: 64, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0);
let usage = ChannelUsage { amount_msat: 128, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 34);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 34);
let usage = ChannelUsage { amount_msat: 896, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_970);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1_970);
let usage = ChannelUsage { amount_msat: 960, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value());
// Fully decay liquidity lower bound.
SinceEpoch::advance(Duration::from_secs(10 * 7));
let usage = ChannelUsage { amount_msat: 0, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0);
let usage = ChannelUsage { amount_msat: 1, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0);
let usage = ChannelUsage { amount_msat: 1_023, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 2_000);
let usage = ChannelUsage { amount_msat: 1_024, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value());
// Fully decay liquidity upper bound.
SinceEpoch::advance(Duration::from_secs(10));
let usage = ChannelUsage { amount_msat: 0, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0);
let usage = ChannelUsage { amount_msat: 1_024, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value());
SinceEpoch::advance(Duration::from_secs(10));
let usage = ChannelUsage { amount_msat: 0, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0);
let usage = ChannelUsage { amount_msat: 1_024, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value());
}
#[test]
fn decays_liquidity_bounds_without_shift_overflow() {
let logger = TestLogger::new();
let network_graph = network_graph(&logger);
- let params = ProbabilisticScoringParameters {
+ let params = ProbabilisticScoringFeeParameters {
liquidity_penalty_multiplier_msat: 1_000,
+ ..ProbabilisticScoringFeeParameters::zero_penalty()
+ };
+ let decay_params = ProbabilisticScoringDecayParameters {
liquidity_offset_half_life: Duration::from_secs(10),
- ..ProbabilisticScoringParameters::zero_penalty()
+ ..ProbabilisticScoringDecayParameters::default()
};
- let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
+ let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
let usage = ChannelUsage {
inflight_htlc_msat: 0,
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 125);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 125);
scorer.payment_path_failed(&payment_path_for_amount(512), 42);
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 281);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 281);
// An unchecked right shift 64 bits or more in DirectedChannelLiquidity::decayed_offset_msat
// would cause an overflow.
SinceEpoch::advance(Duration::from_secs(10 * 64));
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 125);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 125);
SinceEpoch::advance(Duration::from_secs(10));
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 125);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 125);
}
#[test]
fn restricts_liquidity_bounds_after_decay() {
let logger = TestLogger::new();
let network_graph = network_graph(&logger);
- let params = ProbabilisticScoringParameters {
+ let params = ProbabilisticScoringFeeParameters {
liquidity_penalty_multiplier_msat: 1_000,
+ ..ProbabilisticScoringFeeParameters::zero_penalty()
+ };
+ let decay_params = ProbabilisticScoringDecayParameters {
liquidity_offset_half_life: Duration::from_secs(10),
- ..ProbabilisticScoringParameters::zero_penalty()
+ ..ProbabilisticScoringDecayParameters::default()
};
- let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
+ let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
let usage = ChannelUsage {
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 300);
// More knowledge gives higher confidence (256, 768), meaning a lower penalty.
scorer.payment_path_failed(&payment_path_for_amount(768), 42);
scorer.payment_path_failed(&payment_path_for_amount(256), 43);
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 281);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 281);
// Decaying knowledge gives less confidence (128, 896), meaning a higher penalty.
SinceEpoch::advance(Duration::from_secs(10));
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 291);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 291);
// Reducing the upper bound gives more confidence (128, 832) that the payment amount (512)
// is closer to the upper bound, meaning a higher penalty.
scorer.payment_path_successful(&payment_path_for_amount(64));
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 331);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 331);
// Increasing the lower bound gives more confidence (256, 832) that the payment amount (512)
// is closer to the lower bound, meaning a lower penalty.
scorer.payment_path_failed(&payment_path_for_amount(256), 43);
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 245);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 245);
// Further decaying affects the lower bound more than the upper bound (128, 928).
SinceEpoch::advance(Duration::from_secs(10));
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 280);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 280);
}
#[test]
fn restores_persisted_liquidity_bounds() {
let logger = TestLogger::new();
let network_graph = network_graph(&logger);
- let params = ProbabilisticScoringParameters {
+ let params = ProbabilisticScoringFeeParameters {
liquidity_penalty_multiplier_msat: 1_000,
- liquidity_offset_half_life: Duration::from_secs(10),
considered_impossible_penalty_msat: u64::max_value(),
- ..ProbabilisticScoringParameters::zero_penalty()
+ ..ProbabilisticScoringFeeParameters::zero_penalty()
};
- let mut scorer = ProbabilisticScorer::new(params.clone(), &network_graph, &logger);
+ let decay_params = ProbabilisticScoringDecayParameters {
+ liquidity_offset_half_life: Duration::from_secs(10),
+ ..ProbabilisticScoringDecayParameters::default()
+ };
+ let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
let usage = ChannelUsage {
};
scorer.payment_path_failed(&payment_path_for_amount(500), 42);
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value());
SinceEpoch::advance(Duration::from_secs(10));
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 473);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 473);
scorer.payment_path_failed(&payment_path_for_amount(250), 43);
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 300);
let mut serialized_scorer = Vec::new();
scorer.write(&mut serialized_scorer).unwrap();
let mut serialized_scorer = io::Cursor::new(&serialized_scorer);
let deserialized_scorer =
- <ProbabilisticScorer>::read(&mut serialized_scorer, (params, &network_graph, &logger)).unwrap();
- assert_eq!(deserialized_scorer.channel_penalty_msat(42, &source, &target, usage), 300);
+ <ProbabilisticScorer>::read(&mut serialized_scorer, (decay_params, &network_graph, &logger)).unwrap();
+ assert_eq!(deserialized_scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 300);
}
#[test]
fn decays_persisted_liquidity_bounds() {
let logger = TestLogger::new();
let network_graph = network_graph(&logger);
- let params = ProbabilisticScoringParameters {
+ let params = ProbabilisticScoringFeeParameters {
liquidity_penalty_multiplier_msat: 1_000,
- liquidity_offset_half_life: Duration::from_secs(10),
considered_impossible_penalty_msat: u64::max_value(),
- ..ProbabilisticScoringParameters::zero_penalty()
+ ..ProbabilisticScoringFeeParameters::zero_penalty()
};
- let mut scorer = ProbabilisticScorer::new(params.clone(), &network_graph, &logger);
+ let decay_params = ProbabilisticScoringDecayParameters {
+ liquidity_offset_half_life: Duration::from_secs(10),
+ ..ProbabilisticScoringDecayParameters::zero_penalty()
+ };
+ let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
let usage = ChannelUsage {
};
scorer.payment_path_failed(&payment_path_for_amount(500), 42);
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value());
let mut serialized_scorer = Vec::new();
scorer.write(&mut serialized_scorer).unwrap();
let mut serialized_scorer = io::Cursor::new(&serialized_scorer);
let deserialized_scorer =
- <ProbabilisticScorer>::read(&mut serialized_scorer, (params, &network_graph, &logger)).unwrap();
- assert_eq!(deserialized_scorer.channel_penalty_msat(42, &source, &target, usage), 473);
+ <ProbabilisticScorer>::read(&mut serialized_scorer, (decay_params, &network_graph, &logger)).unwrap();
+ assert_eq!(deserialized_scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 473);
scorer.payment_path_failed(&payment_path_for_amount(250), 43);
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 300);
SinceEpoch::advance(Duration::from_secs(10));
- assert_eq!(deserialized_scorer.channel_penalty_msat(42, &source, &target, usage), 365);
+ assert_eq!(deserialized_scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 365);
}
#[test]
// 50k sat reserve).
let logger = TestLogger::new();
let network_graph = network_graph(&logger);
- let params = ProbabilisticScoringParameters::default();
- let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
+ let params = ProbabilisticScoringFeeParameters::default();
+ let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
inflight_htlc_msat: 0,
effective_capacity: EffectiveCapacity::Total { capacity_msat: 950_000_000, htlc_maximum_msat: 1_000 },
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 4375);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 4375);
let usage = ChannelUsage {
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2739);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 2739);
let usage = ChannelUsage {
effective_capacity: EffectiveCapacity::Total { capacity_msat: 2_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2236);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 2236);
let usage = ChannelUsage {
effective_capacity: EffectiveCapacity::Total { capacity_msat: 3_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1983);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1983);
let usage = ChannelUsage {
effective_capacity: EffectiveCapacity::Total { capacity_msat: 4_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1637);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1637);
let usage = ChannelUsage {
effective_capacity: EffectiveCapacity::Total { capacity_msat: 5_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1606);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1606);
let usage = ChannelUsage {
effective_capacity: EffectiveCapacity::Total { capacity_msat: 6_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1331);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1331);
let usage = ChannelUsage {
effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_450_000_000, htlc_maximum_msat: 1_000 }, ..usage
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1387);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1387);
let usage = ChannelUsage {
effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1379);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1379);
let usage = ChannelUsage {
effective_capacity: EffectiveCapacity::Total { capacity_msat: 8_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1363);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1363);
let usage = ChannelUsage {
effective_capacity: EffectiveCapacity::Total { capacity_msat: 9_950_000_000, htlc_maximum_msat: 1_000 }, ..usage
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1355);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 1355);
}
#[test]
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
};
- let params = ProbabilisticScoringParameters {
+ let params = ProbabilisticScoringFeeParameters {
liquidity_penalty_multiplier_msat: 1_000,
- ..ProbabilisticScoringParameters::zero_penalty()
+ ..ProbabilisticScoringFeeParameters::zero_penalty()
};
- let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 58);
+ let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 58);
- let params = ProbabilisticScoringParameters {
+ let params = ProbabilisticScoringFeeParameters {
base_penalty_msat: 500, liquidity_penalty_multiplier_msat: 1_000,
- anti_probing_penalty_msat: 0, ..ProbabilisticScoringParameters::zero_penalty()
+ anti_probing_penalty_msat: 0, ..ProbabilisticScoringFeeParameters::zero_penalty()
};
- let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 558);
+ let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 558);
- let params = ProbabilisticScoringParameters {
+ let params = ProbabilisticScoringFeeParameters {
base_penalty_msat: 500, liquidity_penalty_multiplier_msat: 1_000,
base_penalty_amount_multiplier_msat: (1 << 30),
- anti_probing_penalty_msat: 0, ..ProbabilisticScoringParameters::zero_penalty()
+ anti_probing_penalty_msat: 0, ..ProbabilisticScoringFeeParameters::zero_penalty()
};
- let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 558 + 128);
+ let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 558 + 128);
}
#[test]
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_000 },
};
- let params = ProbabilisticScoringParameters {
+ let params = ProbabilisticScoringFeeParameters {
liquidity_penalty_multiplier_msat: 1_000,
liquidity_penalty_amount_multiplier_msat: 0,
- ..ProbabilisticScoringParameters::zero_penalty()
+ ..ProbabilisticScoringFeeParameters::zero_penalty()
};
- let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300);
+ let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 300);
- let params = ProbabilisticScoringParameters {
+ let params = ProbabilisticScoringFeeParameters {
liquidity_penalty_multiplier_msat: 1_000,
liquidity_penalty_amount_multiplier_msat: 256,
- ..ProbabilisticScoringParameters::zero_penalty()
+ ..ProbabilisticScoringFeeParameters::zero_penalty()
};
- let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 337);
+ let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 337);
}
#[test]
effective_capacity: EffectiveCapacity::Infinite,
};
- let params = ProbabilisticScoringParameters {
+ let params = ProbabilisticScoringFeeParameters {
liquidity_penalty_multiplier_msat: 40_000,
- ..ProbabilisticScoringParameters::zero_penalty()
+ ..ProbabilisticScoringFeeParameters::zero_penalty()
};
- let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 80_000);
+ let decay_params = ProbabilisticScoringDecayParameters::zero_penalty();
+ let scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 80_000);
}
#[test]
fn accounts_for_inflight_htlc_usage() {
let logger = TestLogger::new();
let network_graph = network_graph(&logger);
- let params = ProbabilisticScoringParameters {
+ let params = ProbabilisticScoringFeeParameters {
considered_impossible_penalty_msat: u64::max_value(),
- ..ProbabilisticScoringParameters::zero_penalty()
+ ..ProbabilisticScoringFeeParameters::zero_penalty()
};
- let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
+ let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
inflight_htlc_msat: 0,
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000, htlc_maximum_msat: 1_000 },
};
- assert_ne!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
+ assert_ne!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value());
let usage = ChannelUsage { inflight_htlc_msat: 251, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value());
}
#[test]
fn removes_uncertainity_when_exact_liquidity_known() {
let logger = TestLogger::new();
let network_graph = network_graph(&logger);
- let params = ProbabilisticScoringParameters::default();
- let scorer = ProbabilisticScorer::new(params.clone(), &network_graph, &logger);
+ let params = ProbabilisticScoringFeeParameters::default();
+ let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
inflight_htlc_msat: 0,
effective_capacity: EffectiveCapacity::ExactLiquidity { liquidity_msat: 1_000 },
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), base_penalty_msat);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), base_penalty_msat);
let usage = ChannelUsage { amount_msat: 1_000, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), base_penalty_msat);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), base_penalty_msat);
let usage = ChannelUsage { amount_msat: 1_001, ..usage };
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), u64::max_value());
}
#[test]
fn remembers_historical_failures() {
let logger = TestLogger::new();
let network_graph = network_graph(&logger);
- let params = ProbabilisticScoringParameters {
- liquidity_offset_half_life: Duration::from_secs(60 * 60),
+ let params = ProbabilisticScoringFeeParameters {
historical_liquidity_penalty_multiplier_msat: 1024,
historical_liquidity_penalty_amount_multiplier_msat: 1024,
+ ..ProbabilisticScoringFeeParameters::zero_penalty()
+ };
+ let decay_params = ProbabilisticScoringDecayParameters {
+ liquidity_offset_half_life: Duration::from_secs(60 * 60),
historical_no_updates_half_life: Duration::from_secs(10),
- ..ProbabilisticScoringParameters::zero_penalty()
};
- let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
+ let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
};
// With no historical data the normal liquidity penalty calculation is used.
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 47);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 47);
assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
None);
scorer.payment_path_failed(&payment_path_for_amount(1), 42);
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2048);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 2048);
// The "it failed" increment is 32, where the probability should lie fully in the first
// octile.
assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
// Even after we tell the scorer we definitely have enough available liquidity, it will
// still remember that there was some failure in the past, and assign a non-0 penalty.
scorer.payment_path_failed(&payment_path_for_amount(1000), 43);
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 198);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 198);
// The first octile should be decayed just slightly and the last octile has a new point.
assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
Some(([31, 0, 0, 0, 0, 0, 0, 32], [31, 0, 0, 0, 0, 0, 0, 32])));
// Advance the time forward 16 half-lives (which the docs claim will ensure all data is
// gone), and check that we're back to where we started.
SinceEpoch::advance(Duration::from_secs(10 * 16));
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 47);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 47);
// Once fully decayed we still have data, but its all-0s. In the future we may remove the
// data entirely instead.
assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
};
scorer.payment_path_failed(&payment_path_for_amount(1), 42);
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 409);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 409);
let usage = ChannelUsage {
amount_msat: 1,
inflight_htlc_msat: 0,
effective_capacity: EffectiveCapacity::MaximumHTLC { amount_msat: 0 },
};
- assert_eq!(scorer.channel_penalty_msat(42, &target, &source, usage), 2048);
+ assert_eq!(scorer.channel_penalty_msat(42, &target, &source, usage, ¶ms), 2048);
// Advance to decay all liquidity offsets to zero.
SinceEpoch::advance(Duration::from_secs(60 * 60 * 10));
let network_graph = network_graph(&logger);
let source = source_node_id();
let target = target_node_id();
- let params = ProbabilisticScoringParameters {
+ let params = ProbabilisticScoringFeeParameters {
anti_probing_penalty_msat: 500,
- ..ProbabilisticScoringParameters::zero_penalty()
+ ..ProbabilisticScoringFeeParameters::zero_penalty()
};
- let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
+ let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
// Check we receive no penalty for a low htlc_maximum_msat.
let usage = ChannelUsage {
inflight_htlc_msat: 0,
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_000 },
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0);
// Check we receive anti-probing penalty for htlc_maximum_msat == channel_capacity.
let usage = ChannelUsage {
inflight_htlc_msat: 0,
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 1_024_000 },
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 500);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 500);
// Check we receive anti-probing penalty for htlc_maximum_msat == channel_capacity/2.
let usage = ChannelUsage {
inflight_htlc_msat: 0,
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 512_000 },
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 500);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 500);
// Check we receive no anti-probing penalty for htlc_maximum_msat == channel_capacity/2 - 1.
let usage = ChannelUsage {
inflight_htlc_msat: 0,
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000, htlc_maximum_msat: 511_999 },
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 0);
}
#[test]
// Make sure we'll account for a blinded path's final_value_msat in scoring
let logger = TestLogger::new();
let network_graph = network_graph(&logger);
- let params = ProbabilisticScoringParameters {
+ let params = ProbabilisticScoringFeeParameters {
liquidity_penalty_multiplier_msat: 1_000,
- liquidity_offset_half_life: Duration::from_secs(10),
- ..ProbabilisticScoringParameters::zero_penalty()
+ ..ProbabilisticScoringFeeParameters::zero_penalty()
};
- let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
+ let decay_params = ProbabilisticScoringDecayParameters::default();
+ let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
let source = source_node_id();
let target = target_node_id();
let usage = ChannelUsage {
inflight_htlc_msat: 0,
effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
};
- assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300);
+ assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, ¶ms), 300);
let mut path = payment_path_for_amount(768);
let recipient_hop = path.hops.pop().unwrap();
scorer.payment_path_failed(&path, 43);
let liquidity = scorer.channel_liquidities.get(&42).unwrap()
- .as_directed(&source, &target, 0, 1_000, &scorer.params);
+ .as_directed(&source, &target, 0, 1_000, decay_params);
assert_eq!(liquidity.min_liquidity_msat(), 256);
assert_eq!(liquidity.max_liquidity_msat(), 768);
}
use bitcoin::blockdata::script::{Script, Builder};
use bitcoin::blockdata::opcodes;
use bitcoin::network::constants::Network;
+use bitcoin::psbt::PartiallySignedTransaction;
use bitcoin::util::bip32::{ExtendedPrivKey, ExtendedPubKey, ChildNumber};
use bitcoin::util::sighash;
(2, StaticPaymentOutput),
);
+impl SpendableOutputDescriptor {
+ /// Turns this into a [`bitcoin::psbt::Input`] which can be used to create a
+ /// [`PartiallySignedTransaction`] which spends the given descriptor.
+ ///
+ /// Note that this does not include any signatures, just the information required to
+ /// construct the transaction and sign it.
+ pub fn to_psbt_input(&self) -> bitcoin::psbt::Input {
+ match self {
+ SpendableOutputDescriptor::StaticOutput { output, .. } => {
+ // Is a standard P2WPKH, no need for witness script
+ bitcoin::psbt::Input {
+ witness_utxo: Some(output.clone()),
+ ..Default::default()
+ }
+ },
+ SpendableOutputDescriptor::DelayedPaymentOutput(descriptor) => {
+ // TODO we could add the witness script as well
+ bitcoin::psbt::Input {
+ witness_utxo: Some(descriptor.output.clone()),
+ ..Default::default()
+ }
+ },
+ SpendableOutputDescriptor::StaticPaymentOutput(descriptor) => {
+ // TODO we could add the witness script as well
+ bitcoin::psbt::Input {
+ witness_utxo: Some(descriptor.output.clone()),
+ ..Default::default()
+ }
+ },
+ }
+ }
+
+ /// Creates an unsigned [`PartiallySignedTransaction`] which spends the given descriptors to
+ /// the given outputs, plus an output to the given change destination (if sufficient
+ /// change value remains). The PSBT will have a feerate, at least, of the given value.
+ ///
+ /// The `locktime` argument is used to set the transaction's locktime. If `None`, the
+ /// transaction will have a locktime of 0. It it recommended to set this to the current block
+ /// height to avoid fee sniping, unless you have some specific reason to use a different
+ /// locktime.
+ ///
+ /// Returns the PSBT and expected max transaction weight.
+ ///
+ /// Returns `Err(())` if the output value is greater than the input value minus required fee,
+ /// if a descriptor was duplicated, or if an output descriptor `script_pubkey`
+ /// does not match the one we can spend.
+ ///
+ /// We do not enforce that outputs meet the dust limit or that any output scripts are standard.
+ pub fn create_spendable_outputs_psbt(descriptors: &[&SpendableOutputDescriptor], outputs: Vec<TxOut>, change_destination_script: Script, feerate_sat_per_1000_weight: u32, locktime: Option<PackedLockTime>) -> Result<(PartiallySignedTransaction, usize), ()> {
+ let mut input = Vec::with_capacity(descriptors.len());
+ let mut input_value = 0;
+ let mut witness_weight = 0;
+ let mut output_set = HashSet::with_capacity(descriptors.len());
+ for outp in descriptors {
+ match outp {
+ SpendableOutputDescriptor::StaticPaymentOutput(descriptor) => {
+ if !output_set.insert(descriptor.outpoint) { return Err(()); }
+ input.push(TxIn {
+ previous_output: descriptor.outpoint.into_bitcoin_outpoint(),
+ script_sig: Script::new(),
+ sequence: Sequence::ZERO,
+ witness: Witness::new(),
+ });
+ witness_weight += StaticPaymentOutputDescriptor::MAX_WITNESS_LENGTH;
+ #[cfg(feature = "grind_signatures")]
+ { witness_weight -= 1; } // Guarantees a low R signature
+ input_value += descriptor.output.value;
+ },
+ SpendableOutputDescriptor::DelayedPaymentOutput(descriptor) => {
+ if !output_set.insert(descriptor.outpoint) { return Err(()); }
+ input.push(TxIn {
+ previous_output: descriptor.outpoint.into_bitcoin_outpoint(),
+ script_sig: Script::new(),
+ sequence: Sequence(descriptor.to_self_delay as u32),
+ witness: Witness::new(),
+ });
+ witness_weight += DelayedPaymentOutputDescriptor::MAX_WITNESS_LENGTH;
+ #[cfg(feature = "grind_signatures")]
+ { witness_weight -= 1; } // Guarantees a low R signature
+ input_value += descriptor.output.value;
+ },
+ SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output } => {
+ if !output_set.insert(*outpoint) { return Err(()); }
+ input.push(TxIn {
+ previous_output: outpoint.into_bitcoin_outpoint(),
+ script_sig: Script::new(),
+ sequence: Sequence::ZERO,
+ witness: Witness::new(),
+ });
+ witness_weight += 1 + 73 + 34;
+ #[cfg(feature = "grind_signatures")]
+ { witness_weight -= 1; } // Guarantees a low R signature
+ input_value += output.value;
+ }
+ }
+ if input_value > MAX_VALUE_MSAT / 1000 { return Err(()); }
+ }
+ let mut tx = Transaction {
+ version: 2,
+ lock_time: locktime.unwrap_or(PackedLockTime::ZERO),
+ input,
+ output: outputs,
+ };
+ let expected_max_weight =
+ transaction_utils::maybe_add_change_output(&mut tx, input_value, witness_weight, feerate_sat_per_1000_weight, change_destination_script)?;
+
+ let psbt_inputs = descriptors.iter().map(|d| d.to_psbt_input()).collect::<Vec<_>>();
+ let psbt = PartiallySignedTransaction {
+ inputs: psbt_inputs,
+ outputs: vec![Default::default(); tx.output.len()],
+ unsigned_tx: tx,
+ xpub: Default::default(),
+ version: 0,
+ proprietary: Default::default(),
+ unknown: Default::default(),
+ };
+ Ok((psbt, expected_max_weight))
+ }
+}
+
/// A trait to handle Lightning channel key material without concretizing the channel type or
/// the signature mechanism.
pub trait ChannelSigner {
)
}
- /// Creates a [`Transaction`] which spends the given descriptors to the given outputs, plus an
- /// output to the given change destination (if sufficient change value remains). The
- /// transaction will have a feerate, at least, of the given value.
+ /// Signs the given [`PartiallySignedTransaction`] which spends the given [`SpendableOutputDescriptor`]s.
+ /// The resulting inputs will be finalized and the PSBT will be ready for broadcast if there
+ /// are no other inputs that need signing.
///
- /// Returns `Err(())` if the output value is greater than the input value minus required fee,
- /// if a descriptor was duplicated, or if an output descriptor `script_pubkey`
- /// does not match the one we can spend.
- ///
- /// We do not enforce that outputs meet the dust limit or that any output scripts are standard.
+ /// Returns `Err(())` if the PSBT is missing a descriptor or if we fail to sign.
///
/// May panic if the [`SpendableOutputDescriptor`]s were not generated by channels which used
/// this [`KeysManager`] or one of the [`InMemorySigner`] created by this [`KeysManager`].
- pub fn spend_spendable_outputs<C: Signing>(&self, descriptors: &[&SpendableOutputDescriptor], outputs: Vec<TxOut>, change_destination_script: Script, feerate_sat_per_1000_weight: u32, secp_ctx: &Secp256k1<C>) -> Result<Transaction, ()> {
- let mut input = Vec::new();
- let mut input_value = 0;
- let mut witness_weight = 0;
- let mut output_set = HashSet::with_capacity(descriptors.len());
- for outp in descriptors {
- match outp {
- SpendableOutputDescriptor::StaticPaymentOutput(descriptor) => {
- input.push(TxIn {
- previous_output: descriptor.outpoint.into_bitcoin_outpoint(),
- script_sig: Script::new(),
- sequence: Sequence::ZERO,
- witness: Witness::new(),
- });
- witness_weight += StaticPaymentOutputDescriptor::MAX_WITNESS_LENGTH;
- #[cfg(feature = "grind_signatures")]
- { witness_weight -= 1; } // Guarantees a low R signature
- input_value += descriptor.output.value;
- if !output_set.insert(descriptor.outpoint) { return Err(()); }
- },
- SpendableOutputDescriptor::DelayedPaymentOutput(descriptor) => {
- input.push(TxIn {
- previous_output: descriptor.outpoint.into_bitcoin_outpoint(),
- script_sig: Script::new(),
- sequence: Sequence(descriptor.to_self_delay as u32),
- witness: Witness::new(),
- });
- witness_weight += DelayedPaymentOutputDescriptor::MAX_WITNESS_LENGTH;
- #[cfg(feature = "grind_signatures")]
- { witness_weight -= 1; } // Guarantees a low R signature
- input_value += descriptor.output.value;
- if !output_set.insert(descriptor.outpoint) { return Err(()); }
- },
- SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output } => {
- input.push(TxIn {
- previous_output: outpoint.into_bitcoin_outpoint(),
- script_sig: Script::new(),
- sequence: Sequence::ZERO,
- witness: Witness::new(),
- });
- witness_weight += 1 + 73 + 34;
- #[cfg(feature = "grind_signatures")]
- { witness_weight -= 1; } // Guarantees a low R signature
- input_value += output.value;
- if !output_set.insert(*outpoint) { return Err(()); }
- }
- }
- if input_value > MAX_VALUE_MSAT / 1000 { return Err(()); }
- }
- let mut spend_tx = Transaction {
- version: 2,
- lock_time: PackedLockTime(0),
- input,
- output: outputs,
- };
- let expected_max_weight =
- transaction_utils::maybe_add_change_output(&mut spend_tx, input_value, witness_weight, feerate_sat_per_1000_weight, change_destination_script)?;
-
+ pub fn sign_spendable_outputs_psbt<C: Signing>(&self, descriptors: &[&SpendableOutputDescriptor], psbt: &mut PartiallySignedTransaction, secp_ctx: &Secp256k1<C>) -> Result<(), ()> {
let mut keys_cache: Option<(InMemorySigner, [u8; 32])> = None;
- let mut input_idx = 0;
for outp in descriptors {
match outp {
SpendableOutputDescriptor::StaticPaymentOutput(descriptor) => {
+ let input_idx = psbt.unsigned_tx.input.iter().position(|i| i.previous_output == descriptor.outpoint.into_bitcoin_outpoint()).ok_or(())?;
if keys_cache.is_none() || keys_cache.as_ref().unwrap().1 != descriptor.channel_keys_id {
keys_cache = Some((
self.derive_channel_keys(descriptor.channel_value_satoshis, &descriptor.channel_keys_id),
descriptor.channel_keys_id));
}
- spend_tx.input[input_idx].witness = Witness::from_vec(keys_cache.as_ref().unwrap().0.sign_counterparty_payment_input(&spend_tx, input_idx, &descriptor, &secp_ctx)?);
+ let witness = Witness::from_vec(keys_cache.as_ref().unwrap().0.sign_counterparty_payment_input(&psbt.unsigned_tx, input_idx, &descriptor, &secp_ctx)?);
+ psbt.inputs[input_idx].final_script_witness = Some(witness);
},
SpendableOutputDescriptor::DelayedPaymentOutput(descriptor) => {
+ let input_idx = psbt.unsigned_tx.input.iter().position(|i| i.previous_output == descriptor.outpoint.into_bitcoin_outpoint()).ok_or(())?;
if keys_cache.is_none() || keys_cache.as_ref().unwrap().1 != descriptor.channel_keys_id {
keys_cache = Some((
self.derive_channel_keys(descriptor.channel_value_satoshis, &descriptor.channel_keys_id),
descriptor.channel_keys_id));
}
- spend_tx.input[input_idx].witness = Witness::from_vec(keys_cache.as_ref().unwrap().0.sign_dynamic_p2wsh_input(&spend_tx, input_idx, &descriptor, &secp_ctx)?);
+ let witness = Witness::from_vec(keys_cache.as_ref().unwrap().0.sign_dynamic_p2wsh_input(&psbt.unsigned_tx, input_idx, &descriptor, &secp_ctx)?);
+ psbt.inputs[input_idx].final_script_witness = Some(witness);
},
- SpendableOutputDescriptor::StaticOutput { ref output, .. } => {
+ SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output } => {
+ let input_idx = psbt.unsigned_tx.input.iter().position(|i| i.previous_output == outpoint.into_bitcoin_outpoint()).ok_or(())?;
let derivation_idx = if output.script_pubkey == self.destination_script {
1
} else {
if payment_script != output.script_pubkey { return Err(()); };
- let sighash = hash_to_message!(&sighash::SighashCache::new(&spend_tx).segwit_signature_hash(input_idx, &witness_script, output.value, EcdsaSighashType::All).unwrap()[..]);
+ let sighash = hash_to_message!(&sighash::SighashCache::new(&psbt.unsigned_tx).segwit_signature_hash(input_idx, &witness_script, output.value, EcdsaSighashType::All).unwrap()[..]);
let sig = sign_with_aux_rand(secp_ctx, &sighash, &secret.private_key, &self);
let mut sig_ser = sig.serialize_der().to_vec();
sig_ser.push(EcdsaSighashType::All as u8);
- spend_tx.input[input_idx].witness.push(sig_ser);
- spend_tx.input[input_idx].witness.push(pubkey.inner.serialize().to_vec());
+ let witness = Witness::from_vec(vec![sig_ser, pubkey.inner.serialize().to_vec()]);
+ psbt.inputs[input_idx].final_script_witness = Some(witness);
},
}
- input_idx += 1;
}
+ Ok(())
+ }
+
+ /// Creates a [`Transaction`] which spends the given descriptors to the given outputs, plus an
+ /// output to the given change destination (if sufficient change value remains). The
+ /// transaction will have a feerate, at least, of the given value.
+ ///
+ /// The `locktime` argument is used to set the transaction's locktime. If `None`, the
+ /// transaction will have a locktime of 0. It it recommended to set this to the current block
+ /// height to avoid fee sniping, unless you have some specific reason to use a different
+ /// locktime.
+ ///
+ /// Returns `Err(())` if the output value is greater than the input value minus required fee,
+ /// if a descriptor was duplicated, or if an output descriptor `script_pubkey`
+ /// does not match the one we can spend.
+ ///
+ /// We do not enforce that outputs meet the dust limit or that any output scripts are standard.
+ ///
+ /// May panic if the [`SpendableOutputDescriptor`]s were not generated by channels which used
+ /// this [`KeysManager`] or one of the [`InMemorySigner`] created by this [`KeysManager`].
+ pub fn spend_spendable_outputs<C: Signing>(&self, descriptors: &[&SpendableOutputDescriptor], outputs: Vec<TxOut>, change_destination_script: Script, feerate_sat_per_1000_weight: u32, locktime: Option<PackedLockTime>, secp_ctx: &Secp256k1<C>) -> Result<Transaction, ()> {
+ let (mut psbt, expected_max_weight) = SpendableOutputDescriptor::create_spendable_outputs_psbt(descriptors, outputs, change_destination_script, feerate_sat_per_1000_weight, locktime)?;
+ self.sign_spendable_outputs_psbt(descriptors, &mut psbt, secp_ctx)?;
+
+ let spend_tx = psbt.extract_tx();
+
debug_assert!(expected_max_weight >= spend_tx.weight());
// Note that witnesses with a signature vary somewhat in size, so allow
// `expected_max_weight` to overshoot by up to 3 bytes per input.
}
/// See [`KeysManager::spend_spendable_outputs`] for documentation on this method.
- pub fn spend_spendable_outputs<C: Signing>(&self, descriptors: &[&SpendableOutputDescriptor], outputs: Vec<TxOut>, change_destination_script: Script, feerate_sat_per_1000_weight: u32, secp_ctx: &Secp256k1<C>) -> Result<Transaction, ()> {
- self.inner.spend_spendable_outputs(descriptors, outputs, change_destination_script, feerate_sat_per_1000_weight, secp_ctx)
+ pub fn spend_spendable_outputs<C: Signing>(&self, descriptors: &[&SpendableOutputDescriptor], outputs: Vec<TxOut>, change_destination_script: Script, feerate_sat_per_1000_weight: u32, locktime: Option<PackedLockTime>, secp_ctx: &Secp256k1<C>) -> Result<Transaction, ()> {
+ self.inner.spend_spendable_outputs(descriptors, outputs, change_destination_script, feerate_sat_per_1000_weight, locktime, secp_ctx)
}
/// See [`KeysManager::derive_channel_keys`] for documentation on this method.
let _signer: Box<dyn EcdsaChannelSigner>;
}
-#[cfg(all(test, feature = "_bench_unstable", not(feature = "no-std")))]
-mod benches {
+#[cfg(ldk_bench)]
+pub mod benches {
use std::sync::{Arc, mpsc};
use std::sync::mpsc::TryRecvError;
use std::thread;
use bitcoin::Network;
use crate::sign::{EntropySource, KeysManager};
- use test::Bencher;
+ use criterion::Criterion;
- #[bench]
- fn bench_get_secure_random_bytes(bench: &mut Bencher) {
+ pub fn bench_get_secure_random_bytes(bench: &mut Criterion) {
let seed = [0u8; 32];
let now = Duration::from_secs(genesis_block(Network::Testnet).header.time as u64);
let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_micros()));
stops.push(stop_sender);
}
- bench.iter(|| {
- for _ in 1..100 {
- keys_manager.get_secure_random_bytes();
- }
- });
+ bench.bench_function("get_secure_random_bytes", |b| b.iter(||
+ keys_manager.get_secure_random_bytes()));
for stop in stops {
let _ = stop.send(());
handle.join().unwrap();
}
}
-
}
pub(crate) enum LockHeldState {
HeldByThread,
NotHeldByThread,
- #[cfg(any(feature = "_bench_unstable", not(test)))]
+ #[cfg(any(ldk_bench, not(test)))]
Unsupported,
}
fn unsafe_well_ordered_double_lock_self(&'a self) -> Self::ExclLock;
}
-#[cfg(all(feature = "std", not(feature = "_bench_unstable"), test))]
+#[cfg(all(feature = "std", not(ldk_bench), test))]
mod debug_sync;
-#[cfg(all(feature = "std", not(feature = "_bench_unstable"), test))]
+#[cfg(all(feature = "std", not(ldk_bench), test))]
pub use debug_sync::*;
-#[cfg(all(feature = "std", not(feature = "_bench_unstable"), test))]
+#[cfg(all(feature = "std", not(ldk_bench), test))]
// Note that to make debug_sync's regex work this must not contain `debug_string` in the module name
mod test_lockorder_checks;
-#[cfg(all(feature = "std", any(feature = "_bench_unstable", not(test))))]
+#[cfg(all(feature = "std", any(ldk_bench, not(test))))]
pub(crate) mod fairrwlock;
-#[cfg(all(feature = "std", any(feature = "_bench_unstable", not(test))))]
+#[cfg(all(feature = "std", any(ldk_bench, not(test))))]
pub use {std::sync::{Arc, Mutex, Condvar, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}, fairrwlock::FairRwLock};
-#[cfg(all(feature = "std", any(feature = "_bench_unstable", not(test))))]
+#[cfg(all(feature = "std", any(ldk_bench, not(test))))]
mod ext_impl {
use super::*;
impl<'a, T: 'a> LockTestExt<'a> for Mutex<T> {
// Since the path is reversed, the last element in our iteration is the first
// hop.
if idx == path.hops.len() - 1 {
- scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(payer), &NodeId::from_pubkey(&hop.pubkey), usage);
+ scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(payer), &NodeId::from_pubkey(&hop.pubkey), usage, &());
} else {
let curr_hop_path_idx = path.hops.len() - 1 - idx;
- scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(&path.hops[curr_hop_path_idx - 1].pubkey), &NodeId::from_pubkey(&hop.pubkey), usage);
+ scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(&path.hops[curr_hop_path_idx - 1].pubkey), &NodeId::from_pubkey(&hop.pubkey), usage, &());
}
}
}
let scorer = self.scorer.lock().unwrap();
find_route(
payer, params, &self.network_graph, first_hops, &logger,
- &ScorerAccountingForInFlightHtlcs::new(scorer, &inflight_htlcs),
+ &ScorerAccountingForInFlightHtlcs::new(scorer, &inflight_htlcs), &(),
&[42; 32]
)
}
}
impl chaininterface::BroadcasterInterface for TestBroadcaster {
- fn broadcast_transaction(&self, tx: &Transaction) {
- let lock_time = tx.lock_time.0;
- assert!(lock_time < 1_500_000_000);
- if bitcoin::LockTime::from(tx.lock_time).is_block_height() && lock_time > self.blocks.lock().unwrap().last().unwrap().1 {
- for inp in tx.input.iter() {
- if inp.sequence != Sequence::MAX {
- panic!("We should never broadcast a transaction before its locktime ({})!", tx.lock_time);
+ fn broadcast_transactions(&self, txs: &[&Transaction]) {
+ for tx in txs {
+ let lock_time = tx.lock_time.0;
+ assert!(lock_time < 1_500_000_000);
+ if bitcoin::LockTime::from(tx.lock_time).is_block_height() && lock_time > self.blocks.lock().unwrap().last().unwrap().1 {
+ for inp in tx.input.iter() {
+ if inp.sequence != Sequence::MAX {
+ panic!("We should never broadcast a transaction before its locktime ({})!", tx.lock_time);
+ }
}
}
}
- self.txn_broadcasted.lock().unwrap().push(tx.clone());
+ let owned_txs: Vec<Transaction> = txs.iter().map(|tx| (*tx).clone()).collect();
+ self.txn_broadcasted.lock().unwrap().extend(owned_txs);
}
}
pub pending_events: Mutex<Vec<events::MessageSendEvent>>,
expected_recv_msgs: Mutex<Option<Vec<wire::Message<()>>>>,
connected_peers: Mutex<HashSet<PublicKey>>,
+ pub message_fetch_counter: AtomicUsize,
}
impl TestChannelMessageHandler {
pending_events: Mutex::new(Vec::new()),
expected_recv_msgs: Mutex::new(None),
connected_peers: Mutex::new(HashSet::new()),
+ message_fetch_counter: AtomicUsize::new(0),
}
}
impl events::MessageSendEventsProvider for TestChannelMessageHandler {
fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
+ self.message_fetch_counter.fetch_add(1, Ordering::AcqRel);
let mut pending_events = self.pending_events.lock().unwrap();
let mut ret = Vec::new();
mem::swap(&mut ret, &mut *pending_events);
fn log(&self, record: &Record) {
*self.lines.lock().unwrap().entry((record.module_path.to_string(), format!("{}", record.args))).or_insert(0) += 1;
if record.level >= self.level {
- #[cfg(feature = "std")]
+ #[cfg(all(not(ldk_bench), feature = "std"))]
println!("{:<5} {} [{} : {}, {}] {}", record.level.to_string(), self.id, record.module_path, record.file, record.line, record.args);
}
}
}
impl Score for TestScorer {
+ type ScoreParams = ();
fn channel_penalty_msat(
- &self, short_channel_id: u64, _source: &NodeId, _target: &NodeId, usage: ChannelUsage
+ &self, short_channel_id: u64, _source: &NodeId, _target: &NodeId, usage: ChannelUsage, _score_params: &Self::ScoreParams
) -> u64 {
if let Some(scorer_expectations) = self.scorer_expectations.borrow_mut().as_mut() {
match scorer_expectations.pop_front() {