Merge pull request #2323 from ariard/2023-05-remove-ariard-pgp-key
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Mon, 29 May 2023 22:18:42 +0000 (22:18 +0000)
committerGitHub <noreply@github.com>
Mon, 29 May 2023 22:18:42 +0000 (22:18 +0000)
Remove ariard key from the security team

39 files changed:
.github/workflows/build.yml
.gitignore
Cargo.toml
bench/Cargo.toml [new file with mode: 0644]
bench/README.md [new file with mode: 0644]
bench/benches/bench.rs [new file with mode: 0644]
fuzz/src/chanmon_consistency.rs
fuzz/src/full_stack.rs
lightning-background-processor/src/lib.rs
lightning-block-sync/src/poll.rs
lightning-custom-message/src/lib.rs
lightning-invoice/src/payment.rs
lightning-persister/Cargo.toml
lightning-persister/src/lib.rs
lightning-rapid-gossip-sync/Cargo.toml
lightning-rapid-gossip-sync/src/lib.rs
lightning/Cargo.toml
lightning/src/chain/chaininterface.rs
lightning/src/chain/chainmonitor.rs
lightning/src/chain/channelmonitor.rs
lightning/src/chain/onchaintx.rs
lightning/src/chain/package.rs
lightning/src/lib.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/features.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/monitor_tests.rs
lightning/src/ln/msgs.rs
lightning/src/ln/outbound_payment.rs
lightning/src/ln/payment_tests.rs
lightning/src/ln/peer_handler.rs
lightning/src/ln/reorg_tests.rs
lightning/src/routing/gossip.rs
lightning/src/routing/router.rs
lightning/src/sign/mod.rs
lightning/src/sync/mod.rs
lightning/src/util/test_utils.rs

index 8e02ec9da75c803caa0dfd9b797ea6b892a13298..3b6d1a0388c693e407419af903f295a57cd715b7 100644 (file)
@@ -141,7 +141,12 @@ jobs:
           cd ..
       - name: Run benchmarks on Rust ${{ matrix.toolchain }}
         run: |
-          RUSTC_BOOTSTRAP=1 cargo bench --features _bench_unstable
+          cd bench
+          RUSTFLAGS="--cfg=ldk_bench --cfg=require_route_graph_test" cargo bench
+      - name: Run benchmarks with hashbrown on Rust ${{ matrix.toolchain }}
+        run: |
+          cd bench
+          RUSTFLAGS="--cfg=ldk_bench --cfg=require_route_graph_test" cargo bench --features hashbrown
 
   check_commits:
     runs-on: ubuntu-latest
index 7a889b5b8230af929cd28134454d2f93502531bd..7a6dc4c793032bc2c1ddf8e3e465201df6cf3543 100644 (file)
@@ -8,6 +8,8 @@ lightning-c-bindings/a.out
 Cargo.lock
 .idea
 lightning/target
-lightning/ldk-net_graph-*.bin
+lightning/net_graph-*.bin
+lightning-rapid-gossip-sync/res/full_graph.lngossip
 lightning-custom-message/target
+lightning-transaction-sync/target
 no-std-check/target
index afc0092c72da50a1a9fecbf8bfb2edf5daa04104..a3acccfdaea91614b98421590dd567d1a678ef58 100644 (file)
@@ -14,6 +14,7 @@ exclude = [
     "lightning-custom-message",
     "lightning-transaction-sync",
     "no-std-check",
+    "bench",
 ]
 
 # Our tests do actual crypto and lots of work, the tradeoff for -O2 is well
@@ -35,8 +36,3 @@ lto = "off"
 opt-level = 3
 lto = true
 panic = "abort"
-
-[profile.bench]
-opt-level = 3
-codegen-units = 1
-lto = true
diff --git a/bench/Cargo.toml b/bench/Cargo.toml
new file mode 100644 (file)
index 0000000..e582d29
--- /dev/null
@@ -0,0 +1,25 @@
+[package]
+name = "lightning-bench"
+version = "0.0.1"
+authors = ["Matt Corallo"]
+edition = "2018"
+
+[[bench]]
+name = "bench"
+harness = false
+
+[features]
+hashbrown = ["lightning/hashbrown"]
+
+[dependencies]
+lightning = { path = "../lightning", features = ["_test_utils", "criterion"] }
+lightning-persister = { path = "../lightning-persister", features = ["criterion"] }
+lightning-rapid-gossip-sync = { path = "../lightning-rapid-gossip-sync", features = ["criterion"] }
+criterion = { version = "0.4", default-features = false }
+
+[profile.release]
+opt-level = 3
+codegen-units = 1
+lto = true
+panic = "abort"
+debug = true
diff --git a/bench/README.md b/bench/README.md
new file mode 100644 (file)
index 0000000..7d4cacc
--- /dev/null
@@ -0,0 +1,6 @@
+This crate uses criterion to benchmark various LDK functions.
+
+It can be run as `RUSTFLAGS=--cfg=ldk_bench cargo bench`.
+
+For routing or other HashMap-bottlenecked functions, the `hashbrown` feature
+should also be benchmarked.
diff --git a/bench/benches/bench.rs b/bench/benches/bench.rs
new file mode 100644 (file)
index 0000000..54799f4
--- /dev/null
@@ -0,0 +1,22 @@
+extern crate lightning;
+extern crate lightning_persister;
+
+extern crate criterion;
+
+use criterion::{criterion_group, criterion_main};
+
+criterion_group!(benches,
+       // Note that benches run in the order given here. Thus, they're sorted according to how likely
+       // developers are to be working on the specific code listed, then by runtime.
+       lightning::routing::router::benches::generate_routes_with_zero_penalty_scorer,
+       lightning::routing::router::benches::generate_mpp_routes_with_zero_penalty_scorer,
+       lightning::routing::router::benches::generate_routes_with_probabilistic_scorer,
+       lightning::routing::router::benches::generate_mpp_routes_with_probabilistic_scorer,
+       lightning::routing::router::benches::generate_large_mpp_routes_with_probabilistic_scorer,
+       lightning::sign::benches::bench_get_secure_random_bytes,
+       lightning::ln::channelmanager::bench::bench_sends,
+       lightning_persister::bench::bench_sends,
+       lightning_rapid_gossip_sync::bench::bench_reading_full_graph_from_file,
+       lightning::routing::gossip::benches::read_network_graph,
+       lightning::routing::gossip::benches::write_network_graph);
+criterion_main!(benches);
index 837386bd94ce6a4f288102be3fca40b20f0cca75..f8c6c08a2baca464273858365e770e35145655e4 100644 (file)
@@ -18,8 +18,6 @@
 //! send-side handling is correct, other peers. We consider it a failure if any action results in a
 //! channel being force-closed.
 
-use bitcoin::TxMerkleNode;
-use bitcoin::blockdata::block::BlockHeader;
 use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::blockdata::transaction::{Transaction, TxOut};
 use bitcoin::blockdata::script::{Builder, Script};
@@ -45,6 +43,7 @@ use lightning::ln::channelmanager::{ChainParameters, ChannelDetails, ChannelMana
 use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
 use lightning::ln::msgs::{self, CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init};
 use lightning::ln::script::ShutdownScript;
+use lightning::ln::functional_test_utils::*;
 use lightning::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
 use lightning::util::errors::APIError;
 use lightning::util::logger::Logger;
@@ -101,7 +100,7 @@ impl Router for FuzzRouter {
 
 pub struct TestBroadcaster {}
 impl BroadcasterInterface for TestBroadcaster {
-       fn broadcast_transaction(&self, _tx: &Transaction) { }
+       fn broadcast_transactions(&self, _txs: &[&Transaction]) { }
 }
 
 pub struct VecWriter(pub Vec<u8>);
@@ -547,11 +546,11 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
        macro_rules! confirm_txn {
                ($node: expr) => { {
                        let chain_hash = genesis_block(Network::Bitcoin).block_hash();
-                       let mut header = BlockHeader { version: 0x20000000, prev_blockhash: chain_hash, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
+                       let mut header = create_dummy_header(chain_hash, 42);
                        let txdata: Vec<_> = channel_txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect();
                        $node.transactions_confirmed(&header, &txdata, 1);
                        for _ in 2..100 {
-                               header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
+                               header = create_dummy_header(header.block_hash(), 42);
                        }
                        $node.best_block_updated(&header, 99);
                } }
index 137478e67c15d7dd5056466fa6aed2b1f48427bc..1192766bf3e0baadb5c61fef3c0c07dbff0f56f3 100644 (file)
@@ -13,8 +13,6 @@
 //! or payments to send/ways to handle events generated.
 //! This test has been very useful, though due to its complexity good starting inputs are critical.
 
-use bitcoin::TxMerkleNode;
-use bitcoin::blockdata::block::BlockHeader;
 use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::blockdata::transaction::{Transaction, TxOut};
 use bitcoin::blockdata::script::{Builder, Script};
@@ -41,6 +39,7 @@ use lightning::ln::channelmanager::{ChainParameters, ChannelDetails, ChannelMana
 use lightning::ln::peer_handler::{MessageHandler,PeerManager,SocketDescriptor,IgnoringMessageHandler};
 use lightning::ln::msgs::{self, DecodeError};
 use lightning::ln::script::ShutdownScript;
+use lightning::ln::functional_test_utils::*;
 use lightning::routing::gossip::{P2PGossipSync, NetworkGraph};
 use lightning::routing::utxo::UtxoLookup;
 use lightning::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteParameters, Router};
@@ -145,8 +144,9 @@ struct TestBroadcaster {
        txn_broadcasted: Mutex<Vec<Transaction>>,
 }
 impl BroadcasterInterface for TestBroadcaster {
-       fn broadcast_transaction(&self, tx: &Transaction) {
-               self.txn_broadcasted.lock().unwrap().push(tx.clone());
+       fn broadcast_transactions(&self, txs: &[&Transaction]) {
+               let owned_txs: Vec<Transaction> = txs.iter().map(|tx| (*tx).clone()).collect();
+               self.txn_broadcasted.lock().unwrap().extend(owned_txs);
        }
 }
 
@@ -228,7 +228,7 @@ impl<'a> MoneyLossDetector<'a> {
                }
 
                self.blocks_connected += 1;
-               let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height].0, merkle_root: TxMerkleNode::all_zeros(), time: self.blocks_connected, bits: 42, nonce: 42 };
+               let header = create_dummy_header(self.header_hashes[self.height].0, self.blocks_connected);
                self.height += 1;
                self.manager.transactions_confirmed(&header, &txdata, self.height as u32);
                self.manager.best_block_updated(&header, self.height as u32);
@@ -245,7 +245,7 @@ impl<'a> MoneyLossDetector<'a> {
 
        fn disconnect_block(&mut self) {
                if self.height > 0 && (self.max_height < 6 || self.height >= self.max_height - 6) {
-                       let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height - 1].0, merkle_root: TxMerkleNode::all_zeros(), time: self.header_hashes[self.height].1, bits: 42, nonce: 42 };
+                       let header = create_dummy_header(self.header_hashes[self.height - 1].0, self.header_hashes[self.height].1);
                        self.manager.block_disconnected(&header, self.height as u32);
                        self.monitor.block_disconnected(&header, self.height as u32);
                        self.height -= 1;
@@ -752,16 +752,16 @@ mod tests {
                // 00 030000000000000000000000000000000000000000000000000000000000000002 03000000000000000000000000000000 - noise act two (0||pubkey||mac)
                //
                // 030012 - inbound read from peer id 0 of len 18
-               // 000a 03000000000000000000000000000000 - message header indicating message length 10
-               // 03001a - inbound read from peer id 0 of len 26
-               // 0010 00022000 00022000 03000000000000000000000000000000 - init message (type 16) with static_remotekey (0x2000) and mac
+               // 0010 03000000000000000000000000000000 - message header indicating message length 16
+               // 030020 - inbound read from peer id 0 of len 32
+               // 0010 00021aaa 0008aaaaaaaaaaaa9aaa 03000000000000000000000000000000 - init message (type 16) with static_remotekey required and other bits optional and mac
                //
                // 030012 - inbound read from peer id 0 of len 18
-               // 0141 03000000000000000000000000000000 - message header indicating message length 321
+               // 0147 03000000000000000000000000000000 - message header indicating message length 327
                // 0300fe - inbound read from peer id 0 of len 254
                // 0020 7500000000000000000000000000000000000000000000000000000000000000 ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679 000000000000c350 0000000000000000 0000000000000162 ffffffffffffffff 0000000000000222 0000000000000000 000000fd 0006 01e3 030000000000000000000000000000000000000000000000000000000000000001 030000000000000000000000000000000000000000000000000000000000000002 030000000000000000000000000000000000000000000000000000000000000003 030000000000000000000000000000000000000000000000000000000000000004 - beginning of open_channel message
-               // 030053 - inbound read from peer id 0 of len 83
-               // 030000000000000000000000000000000000000000000000000000000000000005 020900000000000000000000000000000000000000000000000000000000000000 01 03000000000000000000000000000000 - rest of open_channel and mac
+               // 030059 - inbound read from peer id 0 of len 89
+               // 030000000000000000000000000000000000000000000000000000000000000005 020900000000000000000000000000000000000000000000000000000000000000 01 0000 01021000 03000000000000000000000000000000 - rest of open_channel and mac
                //
                // 00fd00fd - Two feerate requests (all returning min feerate, which our open_channel also uses) (gonna be ingested by FuzzEstimator)
                // - client should now respond with accept_channel (CHECK 1: type 33 to peer 03000000)
@@ -800,19 +800,19 @@ mod tests {
                // 000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000 - inbound noise act 3
                //
                // 030112 - inbound read from peer id 1 of len 18
-               // 000a 01000000000000000000000000000000 - message header indicating message length 10
-               // 03011a - inbound read from peer id 1 of len 26
-               // 0010 00022000 00022000 01000000000000000000000000000000 - init message (type 16) with static_remotekey (0x2000) and mac
+               // 0010 01000000000000000000000000000000 - message header indicating message length 16
+               // 030120 - inbound read from peer id 1 of len 32
+               // 0010 00021aaa 0008aaaaaaaaaaaa9aaa 01000000000000000000000000000000 - init message (type 16) with static_remotekey required and other bits optional and mac
                //
                // 05 01 030200000000000000000000000000000000000000000000000000000000000000 00c350 0003e8 - create outbound channel to peer 1 for 50k sat
                // 00fd - One feerate requests (all returning min feerate) (gonna be ingested by FuzzEstimator)
                //
                // 030112 - inbound read from peer id 1 of len 18
-               // 0110 01000000000000000000000000000000 - message header indicating message length 272
+               // 0112 01000000000000000000000000000000 - message header indicating message length 274
                // 0301ff - inbound read from peer id 1 of len 255
                // 0021 0000000000000000000000000000000000000000000000000000000000000e05 0000000000000162 00000000004c4b40 00000000000003e8 00000000000003e8 00000002 03f0 0005 030000000000000000000000000000000000000000000000000000000000000100 030000000000000000000000000000000000000000000000000000000000000200 030000000000000000000000000000000000000000000000000000000000000300 030000000000000000000000000000000000000000000000000000000000000400 030000000000000000000000000000000000000000000000000000000000000500 02660000000000000000000000000000 - beginning of accept_channel
-               // 030121 - inbound read from peer id 1 of len 33
-               // 0000000000000000000000000000000000 01000000000000000000000000000000 - rest of accept_channel and mac
+               // 030123 - inbound read from peer id 1 of len 35
+               // 0000000000000000000000000000000000 0000 01000000000000000000000000000000 - rest of accept_channel and mac
                //
                // 0a - create the funding transaction (client should send funding_created now)
                //
@@ -1006,7 +1006,7 @@ mod tests {
                // - client now fails the HTLC backwards as it was unable to extract the payment preimage (CHECK 9 duplicate and CHECK 10)
 
                let logger = Arc::new(TrackingLogger { lines: Mutex::new(HashMap::new()) });
-               super::do_test(&::hex::decode("01000000000000000000000000000000000000000000000000000000000000000000000001000300000000000000000000000000000000000000000000000000000000000000020300320003000000000000000000000000000000000000000000000000000000000000000203000000000000000000000000000000030012000a0300000000000000000000000000000003001a00100002200000022000030000000000000000000000000000000300120141030000000000000000000000000000000300fe00207500000000000000000000000000000000000000000000000000000000000000ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679000000000000c35000000000000000000000000000000162ffffffffffffffff00000000000002220000000000000000000000fd000601e3030000000000000000000000000000000000000000000000000000000000000001030000000000000000000000000000000000000000000000000000000000000002030000000000000000000000000000000000000000000000000000000000000003030000000000000000000000000000000000000000000000000000000000000004030053030000000000000000000000000000000000000000000000000000000000000005020900000000000000000000000000000000000000000000000000000000000000010300000000000000000000000000000000fd00fd0300120084030000000000000000000000000000000300940022ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb1819096793d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000210100000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000c005e020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae00000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c000003001200430300000000000000000000000000000003005300243d0000000000000000000000000000000000000000000000000000000000000002080000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000010301320003000000000000000000000000000000000000000000000000000000000000000703000000000000000000000000000000030142000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000030112000a0100000000000000000000000000000003011a0010000220000002200001000000000000000000000000000000050103020000000000000000000000000000000000000000000000000000000000000000c3500003e800fd0301120110010000000000000000000000000000000301ff00210000000000000000000000000000000000000000000000000000000000000e05000000000000016200000000004c4b4000000000000003e800000000000003e80000000203f00005030000000000000000000000000000000000000000000000000000000000000100030000000000000000000000000000000000000000000000000000000000000200030000000000000000000000000000000000000000000000000000000000000300030000000000000000000000000000000000000000000000000000000000000400030000000000000000000000000000000000000000000000000000000000000500026600000000000000000000000000000301210000000000000000000000000000000000010000000000000000000000000000000a03011200620100000000000000000000000000000003017200233a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007c0001000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000b03011200430100000000000000000000000000000003015300243a000000000000000000000000000000000000000000000000000000000000000267000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f00003000000000000000000000000000000000000000000000000000000000000055511020203e80401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffab000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000020b00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006a000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000660000000000000000000000000000000000000000000000000000000000000002640000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112004a0100000000000000000000000000000003015a00823a000000000000000000000000000000000000000000000000000000000000000000000000000000ff008888888888888888888888888888888888888888888888888888888888880100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a0000000000000000000000000000000000000000000000000000000000000067000000000000000000000000000000000000000000000000000000000000000265000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000010000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f00003000000000000000000000000000000000000000000000000000000000000055511020203e80401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffab000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000020a000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c3010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000b00000000000000000000000000000000000000000000000000000000000000020d00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000002700000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112002c0100000000000000000000000000000003013c00833a00000000000000000000000000000000000000000000000000000000000000000000000000000100000100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a000000000000000000000000000000000000000000000000000000000000006500000000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000703001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000020c000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000032010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d00000000000000000000000000000000000000000000000000000000000000000000000000000200000000000b0838ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005551202030927c00401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff53000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200a4030000000000000000000000000000000300b400843d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007501000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000006705000000000000000000000000000000000000000000000000000000000000060300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000000020f0000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000070c007d02000000013a000000000000000000000000000000000000000000000000000000000000000000000000000000800258020000000000002200204b0000000000000000000000000000000000000000000000000000000000000014c00000000000001600142800000000000000000000000000000000000000050000200c005e0200000001730000000000000000000000000000000000000000000000000000000000000000000000000000000001a701000000000000220020b200000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c000007").unwrap(), &(Arc::clone(&logger) as Arc<dyn Logger>));
+               super::do_test(&::hex::decode("01000000000000000000000000000000000000000000000000000000000000000000000001000300000000000000000000000000000000000000000000000000000000000000020300320003000000000000000000000000000000000000000000000000000000000000000203000000000000000000000000000000030012001003000000000000000000000000000000030020001000021aaa0008aaaaaaaaaaaa9aaa030000000000000000000000000000000300120147030000000000000000000000000000000300fe00207500000000000000000000000000000000000000000000000000000000000000ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679000000000000c35000000000000000000000000000000162ffffffffffffffff00000000000002220000000000000000000000fd000601e3030000000000000000000000000000000000000000000000000000000000000001030000000000000000000000000000000000000000000000000000000000000002030000000000000000000000000000000000000000000000000000000000000003030000000000000000000000000000000000000000000000000000000000000004030059030000000000000000000000000000000000000000000000000000000000000005020900000000000000000000000000000000000000000000000000000000000000010000010210000300000000000000000000000000000000fd00fd0300120084030000000000000000000000000000000300940022ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb1819096793d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000210100000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000c005e020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae00000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c000003001200430300000000000000000000000000000003005300243d0000000000000000000000000000000000000000000000000000000000000002080000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000010301320003000000000000000000000000000000000000000000000000000000000000000703000000000000000000000000000000030142000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000030112001001000000000000000000000000000000030120001000021aaa0008aaaaaaaaaaaa9aaa01000000000000000000000000000000050103020000000000000000000000000000000000000000000000000000000000000000c3500003e800fd0301120112010000000000000000000000000000000301ff00210000000000000000000000000000000000000000000000000000000000000e05000000000000016200000000004c4b4000000000000003e800000000000003e80000000203f000050300000000000000000000000000000000000000000000000000000000000001000300000000000000000000000000000000000000000000000000000000000002000300000000000000000000000000000000000000000000000000000000000003000300000000000000000000000000000000000000000000000000000000000004000300000000000000000000000000000000000000000000000000000000000005000266000000000000000000000000000003012300000000000000000000000000000000000000010000000000000000000000000000000a03011200620100000000000000000000000000000003017200233a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007c0001000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000b03011200430100000000000000000000000000000003015300243a000000000000000000000000000000000000000000000000000000000000000267000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f00003000000000000000000000000000000000000000000000000000000000000055511020203e80401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffab000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000020b00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006a000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000660000000000000000000000000000000000000000000000000000000000000002640000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112004a0100000000000000000000000000000003015a00823a000000000000000000000000000000000000000000000000000000000000000000000000000000ff008888888888888888888888888888888888888888888888888888888888880100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a0000000000000000000000000000000000000000000000000000000000000067000000000000000000000000000000000000000000000000000000000000000265000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000010000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f00003000000000000000000000000000000000000000000000000000000000000055511020203e80401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffab000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000020a000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c3010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000b00000000000000000000000000000000000000000000000000000000000000020d00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000002700000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112002c0100000000000000000000000000000003013c00833a00000000000000000000000000000000000000000000000000000000000000000000000000000100000100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a000000000000000000000000000000000000000000000000000000000000006500000000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000703001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000020c000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000032010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d00000000000000000000000000000000000000000000000000000000000000000000000000000200000000000b0838ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005551202030927c00401a0060800000e00000100000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff53000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200a4030000000000000000000000000000000300b400843d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007501000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000006705000000000000000000000000000000000000000000000000000000000000060300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000000020f0000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000070c007d02000000013a000000000000000000000000000000000000000000000000000000000000000000000000000000800258020000000000002200204b0000000000000000000000000000000000000000000000000000000000000014c00000000000001600142800000000000000000000000000000000000000050000200c005e0200000001730000000000000000000000000000000000000000000000000000000000000000000000000000000001a701000000000000220020b200000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c000007").unwrap(), &(Arc::clone(&logger) as Arc<dyn Logger>));
 
                let log_entries = logger.lines.lock().unwrap();
                assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendAcceptChannel event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679".to_string())), Some(&1)); // 1
index 539a6bf14880397e580c8431fd2ceaf1d1cf43ab..4d270286d5027c780d36a16a58711d11e72dcf47 100644 (file)
@@ -108,7 +108,7 @@ const PING_TIMER: u64 = 1;
 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
 
 #[cfg(not(test))]
-const SCORER_PERSIST_TIMER: u64 = 30;
+const SCORER_PERSIST_TIMER: u64 = 60 * 60;
 #[cfg(test)]
 const SCORER_PERSIST_TIMER: u64 = 1;
 
@@ -236,9 +236,11 @@ fn handle_network_graph_update<L: Deref>(
        }
 }
 
+/// Updates scorer based on event and returns whether an update occurred so we can decide whether
+/// to persist.
 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
        scorer: &'a S, event: &Event
-) {
+) -> bool {
        let mut score = scorer.lock();
        match event {
                Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
@@ -258,8 +260,9 @@ fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + Wri
                Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
                        score.probe_failed(path, *scid);
                },
-               _ => {},
+               _ => return false,
        }
+       true
 }
 
 macro_rules! define_run_body {
@@ -352,9 +355,15 @@ macro_rules! define_run_body {
                        // Note that we want to run a graph prune once not long after startup before
                        // falling back to our usual hourly prunes. This avoids short-lived clients never
                        // pruning their network graph. We run once 60 seconds after startup before
-                       // continuing our normal cadence.
+                       // continuing our normal cadence. For RGS, since 60 seconds is likely too long,
+                       // we prune after an initial sync completes.
                        let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
-                       if $timer_elapsed(&mut last_prune_call, prune_timer) {
+                       let prune_timer_elapsed = $timer_elapsed(&mut last_prune_call, prune_timer);
+                       let should_prune = match $gossip_sync {
+                               GossipSync::Rapid(_) => !have_pruned || prune_timer_elapsed,
+                               _ => prune_timer_elapsed,
+                       };
+                       if should_prune {
                                // The network graph must not be pruned while rapid sync completion is pending
                                if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
                                        #[cfg(feature = "std")] {
@@ -616,12 +625,19 @@ where
                let network_graph = gossip_sync.network_graph();
                let event_handler = &event_handler;
                let scorer = &scorer;
+               let logger = &logger;
+               let persister = &persister;
                async move {
                        if let Some(network_graph) = network_graph {
                                handle_network_graph_update(network_graph, &event)
                        }
                        if let Some(ref scorer) = scorer {
-                               update_scorer(scorer, &event);
+                               if update_scorer(scorer, &event) {
+                                       log_trace!(logger, "Persisting scorer after update");
+                                       if let Err(e) = persister.persist_scorer(&scorer) {
+                                               log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
+                                       }
+                               }
                        }
                        event_handler(event).await;
                }
@@ -751,7 +767,12 @@ impl BackgroundProcessor {
                                        handle_network_graph_update(network_graph, &event)
                                }
                                if let Some(ref scorer) = scorer {
-                                       update_scorer(scorer, &event);
+                                       if update_scorer(scorer, &event) {
+                                               log_trace!(logger, "Persisting scorer after update");
+                                               if let Err(e) = persister.persist_scorer(&scorer) {
+                                                       log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
+                                               }
+                                       }
                                }
                                event_handler.handle_event(event);
                        };
@@ -817,7 +838,6 @@ impl Drop for BackgroundProcessor {
 
 #[cfg(all(feature = "std", test))]
 mod tests {
-       use bitcoin::blockdata::block::BlockHeader;
        use bitcoin::blockdata::constants::genesis_block;
        use bitcoin::blockdata::locktime::PackedLockTime;
        use bitcoin::blockdata::transaction::{Transaction, TxOut};
@@ -833,6 +853,7 @@ mod tests {
        use lightning::ln::channelmanager;
        use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
        use lightning::ln::features::{ChannelFeatures, NodeFeatures};
+       use lightning::ln::functional_test_utils::*;
        use lightning::ln::msgs::{ChannelMessageHandler, Init};
        use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
        use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
@@ -849,8 +870,6 @@ mod tests {
        use std::sync::{Arc, Mutex};
        use std::sync::mpsc::SyncSender;
        use std::time::Duration;
-       use bitcoin::hashes::Hash;
-       use bitcoin::TxMerkleNode;
        use lightning_rapid_gossip_sync::RapidGossipSync;
        use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
 
@@ -1190,7 +1209,7 @@ mod tests {
                for i in 1..=depth {
                        let prev_blockhash = node.best_block.block_hash();
                        let height = node.best_block.height() + 1;
-                       let header = BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 };
+                       let header = create_dummy_header(prev_blockhash, height);
                        let txdata = vec![(0, tx)];
                        node.best_block = BestBlock::new(header.block_hash(), height);
                        match i {
@@ -1710,6 +1729,10 @@ mod tests {
                if !std::thread::panicking() {
                        bg_processor.stop().unwrap();
                }
+
+               let log_entries = nodes[0].logger.lines.lock().unwrap();
+               let expected_log = "Persisting scorer after update".to_string();
+               assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5);
        }
 
        #[tokio::test]
@@ -1752,6 +1775,10 @@ mod tests {
                let t2 = tokio::spawn(async move {
                        do_test_payment_path_scoring!(nodes, receiver.recv().await);
                        exit_sender.send(()).unwrap();
+
+                       let log_entries = nodes[0].logger.lines.lock().unwrap();
+                       let expected_log = "Persisting scorer after update".to_string();
+                       assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5);
                });
 
                let (r1, r2) = tokio::join!(t1, t2);
index 9f7e8becf5060c2f62471825a2aa0cfeca573f5e..e7171cf3656138036d50385691c96e419db29bcd 100644 (file)
@@ -136,8 +136,11 @@ impl ValidatedBlockHeader {
 
                if let Network::Bitcoin = network {
                        if self.height % 2016 == 0 {
-                               let previous_work = previous_header.header.work();
-                               if work > (previous_work << 2) || work < (previous_work >> 2) {
+                               let target = self.header.target();
+                               let previous_target = previous_header.header.target();
+                               let min_target = previous_target >> 2;
+                               let max_target = previous_target << 2;
+                               if target > max_target || target < min_target {
                                        return Err(BlockSourceError::persistent("invalid difficulty transition"))
                                }
                        } else if self.header.bits != previous_header.header.bits {
index a6e43978d47ded2e1c9efd58c50fe234d7a02564..a0a70c9de03ff26dfd499bb7b8cb1dc6eeee8dbe 100644 (file)
@@ -20,6 +20,7 @@
 //! # use bitcoin::secp256k1::PublicKey;
 //! # use lightning::io;
 //! # use lightning::ln::msgs::{DecodeError, LightningError};
+//! # use lightning::ln::features::{InitFeatures, NodeFeatures};
 //! use lightning::ln::peer_handler::CustomMessageHandler;
 //! use lightning::ln::wire::{CustomMessageReader, self};
 //! use lightning::util::ser::Writeable;
 //! #     fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> {
 //! #         unimplemented!()
 //! #     }
+//! #     fn provided_node_features(&self) -> NodeFeatures {
+//! #         unimplemented!()
+//! #     }
+//! #     fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
+//! #         unimplemented!()
+//! #     }
 //! }
 //!
 //! #[derive(Debug)]
 //! #     fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> {
 //! #         unimplemented!()
 //! #     }
+//! #     fn provided_node_features(&self) -> NodeFeatures {
+//! #         unimplemented!()
+//! #     }
+//! #     fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
+//! #         unimplemented!()
+//! #     }
 //! }
 //!
 //! #[derive(Debug)]
 //! #     fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> {
 //! #         unimplemented!()
 //! #     }
+//! #     fn provided_node_features(&self) -> NodeFeatures {
+//! #         unimplemented!()
+//! #     }
+//! #     fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
+//! #         unimplemented!()
+//! #     }
 //! }
 //!
 //! # fn main() {
@@ -268,6 +287,22 @@ macro_rules! composite_custom_message_handler {
                                        )*
                                        .collect()
                        }
+
+                       fn provided_node_features(&self) -> $crate::lightning::ln::features::NodeFeatures {
+                               $crate::lightning::ln::features::NodeFeatures::empty()
+                                       $(
+                                               | self.$field.provided_node_features()
+                                       )*
+                       }
+
+                       fn provided_init_features(
+                               &self, their_node_id: &$crate::bitcoin::secp256k1::PublicKey
+                       ) -> $crate::lightning::ln::features::InitFeatures {
+                               $crate::lightning::ln::features::InitFeatures::empty()
+                                       $(
+                                               | self.$field.provided_init_features(their_node_id)
+                                       )*
+                       }
                }
 
                impl $crate::lightning::ln::wire::CustomMessageReader for $handler {
index c08a00a0ca23ec18f31fcfbbe1afca9aff6ba57e..7abfbe4bfc4e7201d03bcdabafcd023d28f7cb3a 100644 (file)
@@ -169,7 +169,7 @@ fn expiry_time_from_unix_epoch(invoice: &Invoice) -> Duration {
 }
 
 /// An error that may occur when making a payment.
-#[derive(Clone, Debug)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub enum PaymentError {
        /// An error resulting from the provided [`Invoice`] or payment hash.
        Invoice(&'static str),
index 88132bdcfd660ed70e367ed63c420a08244e408c..22d4b16c42dde8a4828fb424ad64ab5da52ad326 100644 (file)
@@ -13,9 +13,6 @@ edition = "2018"
 all-features = true
 rustdoc-args = ["--cfg", "docsrs"]
 
-[features]
-_bench_unstable = ["lightning/_bench_unstable"]
-
 [dependencies]
 bitcoin = "0.29.0"
 lightning = { version = "0.0.115", path = "../lightning" }
@@ -24,5 +21,8 @@ libc = "0.2"
 [target.'cfg(windows)'.dependencies]
 winapi = { version = "0.3", features = ["winbase"] }
 
+[target.'cfg(ldk_bench)'.dependencies]
+criterion = { version = "0.4", optional = true, default-features = false }
+
 [dev-dependencies]
 lightning = { version = "0.0.115", path = "../lightning", features = ["_test_utils"] }
index d25ab6f9fca9db824c6f0b7a6d1a3d1086989d72..670a7369d8b92c7bb415125f3dc49186c4b2f3d3 100644 (file)
@@ -8,8 +8,7 @@
 
 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
 
-#![cfg_attr(all(test, feature = "_bench_unstable"), feature(test))]
-#[cfg(all(test, feature = "_bench_unstable"))] extern crate test;
+#[cfg(ldk_bench)] extern crate criterion;
 
 mod util;
 
@@ -91,13 +90,13 @@ impl FilesystemPersister {
                                continue;
                        }
 
-                       let txid = Txid::from_hex(filename.split_at(64).0)
+                       let txid: Txid = Txid::from_hex(filename.split_at(64).0)
                                .map_err(|_| std::io::Error::new(
                                        std::io::ErrorKind::InvalidData,
                                        "Invalid tx ID in filename",
                                ))?;
 
-                       let index = filename.split_at(65).1.parse()
+                       let index: u16 = filename.split_at(65).1.parse()
                                .map_err(|_| std::io::Error::new(
                                        std::io::ErrorKind::InvalidData,
                                        "Invalid tx index in filename",
@@ -136,9 +135,8 @@ mod tests {
        extern crate lightning;
        extern crate bitcoin;
        use crate::FilesystemPersister;
-       use bitcoin::blockdata::block::{Block, BlockHeader};
        use bitcoin::hashes::hex::FromHex;
-       use bitcoin::{Txid, TxMerkleNode};
+       use bitcoin::Txid;
        use lightning::chain::ChannelMonitorUpdateStatus;
        use lightning::chain::chainmonitor::Persist;
        use lightning::chain::channelmonitor::CLOSED_CHANNEL_UPDATE_ID;
@@ -148,7 +146,6 @@ mod tests {
        use lightning::ln::functional_test_utils::*;
        use lightning::util::test_utils;
        use std::fs;
-       use bitcoin::hashes::Hash;
        #[cfg(target_os = "windows")]
        use {
                lightning::get_event_msg,
@@ -247,8 +244,7 @@ mod tests {
                let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 1);
 
-               let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
-               connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[0].clone()]});
+               connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
                check_closed_broadcast!(nodes[1], true);
                check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                check_added_monitors!(nodes[1], 1);
@@ -338,14 +334,16 @@ mod tests {
        }
 }
 
-#[cfg(all(test, feature = "_bench_unstable"))]
+#[cfg(ldk_bench)]
+/// Benches
 pub mod bench {
-       use test::Bencher;
+       use criterion::Criterion;
 
-       #[bench]
-       fn bench_sends(bench: &mut Bencher) {
+       /// Bench!
+       pub fn bench_sends(bench: &mut Criterion) {
                let persister_a = super::FilesystemPersister::new("bench_filesystem_persister_a".to_string());
                let persister_b = super::FilesystemPersister::new("bench_filesystem_persister_b".to_string());
-               lightning::ln::channelmanager::bench::bench_two_sends(bench, persister_a, persister_b);
+               lightning::ln::channelmanager::bench::bench_two_sends(
+                       bench, "bench_filesystem_persisted_sends", persister_a, persister_b);
        }
 }
index 6673431d93b210d24c87ac42e9c9561b4728d66b..73a68751128809a694ac8eaeede95fb58754d2b8 100644 (file)
@@ -13,11 +13,13 @@ Utility to process gossip routing data from Rapid Gossip Sync Server.
 default = ["std"]
 no-std = ["lightning/no-std"]
 std = ["lightning/std"]
-_bench_unstable = []
 
 [dependencies]
 lightning = { version = "0.0.115", path = "../lightning", default-features = false }
 bitcoin = { version = "0.29.0", default-features = false }
 
+[target.'cfg(ldk_bench)'.dependencies]
+criterion = { version = "0.4", optional = true, default-features = false }
+
 [dev-dependencies]
 lightning = { version = "0.0.115", path = "../lightning", features = ["_test_utils"] }
index c8f140cc18955d8225bbf300fcda2aee6ddae267..5a61be7990e2a17270b7c61deeb8944952255735 100644 (file)
 
 #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
 
-// Allow and import test features for benching
-#![cfg_attr(all(test, feature = "_bench_unstable"), feature(test))]
-#[cfg(all(test, feature = "_bench_unstable"))]
-extern crate test;
+#[cfg(ldk_bench)] extern crate criterion;
 
 #[cfg(not(feature = "std"))]
 extern crate alloc;
@@ -287,36 +284,42 @@ mod tests {
        }
 }
 
-#[cfg(all(test, feature = "_bench_unstable"))]
+#[cfg(ldk_bench)]
+/// Benches
 pub mod bench {
-       use test::Bencher;
-
        use bitcoin::Network;
 
-       use lightning::ln::msgs::DecodeError;
+       use criterion::Criterion;
+
+       use std::fs;
+
        use lightning::routing::gossip::NetworkGraph;
        use lightning::util::test_utils::TestLogger;
 
        use crate::RapidGossipSync;
 
-       #[bench]
-       fn bench_reading_full_graph_from_file(b: &mut Bencher) {
+       /// Bench!
+       pub fn bench_reading_full_graph_from_file(b: &mut Criterion) {
                let logger = TestLogger::new();
-               b.iter(|| {
+               b.bench_function("read_full_graph_from_rgs", |b| b.iter(|| {
                        let network_graph = NetworkGraph::new(Network::Bitcoin, &logger);
                        let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
-                       let sync_result = rapid_sync.sync_network_graph_with_file_path("./res/full_graph.lngossip");
-                       if let Err(crate::error::GraphSyncError::DecodeError(DecodeError::Io(io_error))) = &sync_result {
-                               let error_string = format!("Input file lightning-rapid-gossip-sync/res/full_graph.lngossip is missing! Download it from https://bitcoin.ninja/ldk-compressed_graph-bc08df7542-2022-05-05.bin\n\n{:?}", io_error);
-                               #[cfg(not(require_route_graph_test))]
-                               {
-                                       println!("{}", error_string);
-                                       return;
-                               }
-                               #[cfg(require_route_graph_test)]
-                               panic!("{}", error_string);
-                       }
-                       assert!(sync_result.is_ok())
-               });
+                       let mut file = match fs::read("../lightning-rapid-gossip-sync/res/full_graph.lngossip") {
+                               Ok(f) => f,
+                               Err(io_error) => {
+                                       let error_string = format!(
+                                               "Input file lightning-rapid-gossip-sync/res/full_graph.lngossip is missing! Download it from https://bitcoin.ninja/ldk-compressed_graph-bc08df7542-2022-05-05.bin\n\n{:?}",
+                                               io_error);
+                                       #[cfg(not(require_route_graph_test))]
+                                       {
+                                               println!("{}", error_string);
+                                               return;
+                                       }
+                                       #[cfg(require_route_graph_test)]
+                                       panic!("{}", error_string);
+                               },
+                       };
+                       rapid_sync.update_network_graph_no_std(&mut file, None).unwrap();
+               }));
        }
 }
index 1804be0f1bb50ac26b84c6472a7c1137f5b8ebf8..efaa82d2031ba27e2a40cd930f367780f26efb71 100644 (file)
@@ -28,7 +28,6 @@ max_level_trace = []
 # Allow signing of local transactions that may have been revoked or will be revoked, for functional testing (e.g. justice tx handling).
 # This is unsafe to use in production because it may result in the counterparty publishing taking our funds.
 unsafe_revoked_tx_signing = []
-_bench_unstable = []
 # Override signing to not include randomness when generating signatures for test vectors.
 _test_vectors = []
 
@@ -59,5 +58,8 @@ version = "0.29.0"
 default-features = false
 features = ["bitcoinconsensus", "secp-recovery"]
 
+[target.'cfg(ldk_bench)'.dependencies]
+criterion = { version = "0.4", optional = true, default-features = false }
+
 [target.'cfg(taproot)'.dependencies]
 musig2 = { git = "https://github.com/arik-so/rust-musig2", rev = "27797d7" }
index e923a94bb6d3e7afb5f37376b6de132225821d21..d875dcce3e128c1443a2deaa46f1a8465a7cd06b 100644 (file)
@@ -19,8 +19,20 @@ use bitcoin::blockdata::transaction::Transaction;
 
 /// An interface to send a transaction to the Bitcoin network.
 pub trait BroadcasterInterface {
-       /// Sends a transaction out to (hopefully) be mined.
-       fn broadcast_transaction(&self, tx: &Transaction);
+       /// Sends a list of transactions out to (hopefully) be mined.
+       /// This only needs to handle the actual broadcasting of transactions, LDK will automatically
+       /// rebroadcast transactions that haven't made it into a block.
+       ///
+       /// In some cases LDK may attempt to broadcast a transaction which double-spends another
+       /// and this isn't a bug and can be safely ignored.
+       ///
+       /// If more than one transaction is given, these transactions should be considered to be a
+       /// package and broadcast together. Some of the transactions may or may not depend on each other,
+       /// be sure to manage both cases correctly.
+       ///
+       /// Bitcoin transaction packages are defined in BIP 331 and here:
+       /// https://github.com/bitcoin/bitcoin/blob/master/doc/policy/packages.md
+       fn broadcast_transactions(&self, txs: &[&Transaction]);
 }
 
 /// An enum that represents the speed at which we want a transaction to confirm used for feerate
index 37a497005d9948dc531ee7c53c277d927a6fc33c..261e5593b5b4600ad661142a4579308b68903316 100644 (file)
@@ -825,8 +825,6 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T: Deref, F: Deref, L
 
 #[cfg(test)]
 mod tests {
-       use bitcoin::{BlockHeader, TxMerkleNode};
-       use bitcoin::hashes::Hash;
        use crate::{check_added_monitors, check_closed_broadcast, check_closed_event};
        use crate::{expect_payment_sent, expect_payment_claimed, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg};
        use crate::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err};
@@ -972,10 +970,7 @@ mod tests {
 
                // Connect B's commitment transaction, but only to the ChainMonitor/ChannelMonitor. The
                // channel is now closed, but the ChannelManager doesn't know that yet.
-               let new_header = BlockHeader {
-                       version: 2, time: 0, bits: 0, nonce: 0,
-                       prev_blockhash: nodes[0].best_block_info().0,
-                       merkle_root: TxMerkleNode::all_zeros() };
+               let new_header = create_dummy_header(nodes[0].best_block_info().0, 0);
                nodes[0].chain_monitor.chain_monitor.transactions_confirmed(&new_header,
                        &[(0, &remote_txn[0]), (1, &remote_txn[1])], nodes[0].best_block_info().1 + 1);
                assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
@@ -999,10 +994,7 @@ mod tests {
 
                if block_timeout {
                        // After three blocks, pending MontiorEvents should be released either way.
-                       let latest_header = BlockHeader {
-                               version: 2, time: 0, bits: 0, nonce: 0,
-                               prev_blockhash: nodes[0].best_block_info().0,
-                               merkle_root: TxMerkleNode::all_zeros() };
+                       let latest_header = create_dummy_header(nodes[0].best_block_info().0, 0);
                        nodes[0].chain_monitor.chain_monitor.best_block_updated(&latest_header, nodes[0].best_block_info().1 + LATENCY_GRACE_PERIOD_BLOCKS);
                } else {
                        let persistences = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clone();
index e3be6bff2fa88b4191d9b3fdc8bccf665d4ac580..a48f169a4d5e605b330f49896bc191779dd389d7 100644 (file)
@@ -2327,10 +2327,13 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                where B::Target: BroadcasterInterface,
                                        L::Target: Logger,
        {
-               for tx in self.get_latest_holder_commitment_txn(logger).iter() {
+               let commit_txs = self.get_latest_holder_commitment_txn(logger);
+               let mut txs = vec![];
+               for tx in commit_txs.iter() {
                        log_info!(logger, "Broadcasting local {}", log_tx!(tx));
-                       broadcaster.broadcast_transaction(tx);
+                       txs.push(tx);
                }
+               broadcaster.broadcast_transactions(&txs);
                self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(self.funding_info.0));
        }
 
@@ -2431,7 +2434,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                                        let commitment_package = PackageTemplate::build_package(
                                                                self.funding_info.0.txid.clone(), self.funding_info.0.index as u32,
                                                                PackageSolvingData::HolderFundingOutput(funding_output),
-                                                               best_block_height, false, best_block_height,
+                                                               best_block_height, best_block_height
                                                        );
                                                        self.onchain_tx_handler.update_claims_view_from_requests(
                                                                vec![commitment_package], best_block_height, best_block_height,
@@ -2614,8 +2617,8 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                        // First, process non-htlc outputs (to_holder & to_counterparty)
                        for (idx, outp) in tx.output.iter().enumerate() {
                                if outp.script_pubkey == revokeable_p2wsh {
-                                       let revk_outp = RevokedOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, outp.value, self.counterparty_commitment_params.on_counterparty_tx_csv);
-                                       let justice_package = PackageTemplate::build_package(commitment_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32, true, height);
+                                       let revk_outp = RevokedOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, outp.value, self.counterparty_commitment_params.on_counterparty_tx_csv, self.onchain_tx_handler.opt_anchors());
+                                       let justice_package = PackageTemplate::build_package(commitment_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32, height);
                                        claimable_outpoints.push(justice_package);
                                        to_counterparty_output_info =
                                                Some((idx.try_into().expect("Txn can't have more than 2^32 outputs"), outp.value));
@@ -2633,7 +2636,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                                                to_counterparty_output_info);
                                                }
                                                let revk_htlc_outp = RevokedHTLCOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, htlc.amount_msat / 1000, htlc.clone(), self.onchain_tx_handler.channel_transaction_parameters.opt_anchors.is_some());
-                                               let justice_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, PackageSolvingData::RevokedHTLCOutput(revk_htlc_outp), htlc.cltv_expiry, true, height);
+                                               let justice_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, PackageSolvingData::RevokedHTLCOutput(revk_htlc_outp), htlc.cltv_expiry, height);
                                                claimable_outpoints.push(justice_package);
                                        }
                                }
@@ -2758,8 +2761,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                                                self.counterparty_commitment_params.counterparty_htlc_base_key,
                                                                htlc.clone(), self.onchain_tx_handler.opt_anchors()))
                                        };
-                                       let aggregation = if !htlc.offered { false } else { true };
-                                       let counterparty_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, counterparty_htlc_outp, htlc.cltv_expiry,aggregation, 0);
+                                       let counterparty_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, counterparty_htlc_outp, htlc.cltv_expiry, 0);
                                        claimable_outpoints.push(counterparty_package);
                                }
                        }
@@ -2798,11 +2800,12 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                let revk_outp = RevokedOutput::build(
                                        per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key,
                                        self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key,
-                                       tx.output[idx].value, self.counterparty_commitment_params.on_counterparty_tx_csv
+                                       tx.output[idx].value, self.counterparty_commitment_params.on_counterparty_tx_csv,
+                                       false
                                );
                                let justice_package = PackageTemplate::build_package(
                                        htlc_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp),
-                                       height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32, true, height
+                                       height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32, height
                                );
                                claimable_outpoints.push(justice_package);
                                if outputs_to_watch.is_none() {
@@ -2825,11 +2828,11 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
 
                for &(ref htlc, _, _) in holder_tx.htlc_outputs.iter() {
                        if let Some(transaction_output_index) = htlc.transaction_output_index {
-                               let (htlc_output, aggregable) = if htlc.offered {
+                               let htlc_output = if htlc.offered {
                                        let htlc_output = HolderHTLCOutput::build_offered(
                                                htlc.amount_msat, htlc.cltv_expiry, self.onchain_tx_handler.opt_anchors()
                                        );
-                                       (htlc_output, false)
+                                       htlc_output
                                } else {
                                        let payment_preimage = if let Some(preimage) = self.payment_preimages.get(&htlc.payment_hash) {
                                                preimage.clone()
@@ -2840,12 +2843,12 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                        let htlc_output = HolderHTLCOutput::build_accepted(
                                                payment_preimage, htlc.amount_msat, self.onchain_tx_handler.opt_anchors()
                                        );
-                                       (htlc_output, self.onchain_tx_handler.opt_anchors())
+                                       htlc_output
                                };
                                let htlc_package = PackageTemplate::build_package(
                                        holder_tx.txid, transaction_output_index,
                                        PackageSolvingData::HolderHTLCOutput(htlc_output),
-                                       htlc.cltv_expiry, aggregable, conf_height
+                                       htlc.cltv_expiry, conf_height
                                );
                                claim_requests.push(htlc_package);
                        }
@@ -3185,7 +3188,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                let should_broadcast = self.should_broadcast_holder_commitment_txn(logger);
                if should_broadcast {
                        let funding_outp = HolderFundingOutput::build(self.funding_redeemscript.clone(), self.channel_value_satoshis, self.onchain_tx_handler.opt_anchors());
-                       let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), false, self.best_block.height());
+                       let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), self.best_block.height());
                        claimable_outpoints.push(commitment_package);
                        self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(self.funding_info.0));
                        let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript);
@@ -4084,7 +4087,6 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
 
 #[cfg(test)]
 mod tests {
-       use bitcoin::blockdata::block::BlockHeader;
        use bitcoin::blockdata::script::{Script, Builder};
        use bitcoin::blockdata::opcodes;
        use bitcoin::blockdata::transaction::{Transaction, TxIn, TxOut, EcdsaSighashType};
@@ -4121,7 +4123,7 @@ mod tests {
        use crate::util::ser::{ReadableArgs, Writeable};
        use crate::sync::{Arc, Mutex};
        use crate::io;
-       use bitcoin::{PackedLockTime, Sequence, TxMerkleNode, Witness};
+       use bitcoin::{PackedLockTime, Sequence, Witness};
        use crate::prelude::*;
 
        fn do_test_funding_spend_refuses_updates(use_local_txn: bool) {
@@ -4160,10 +4162,7 @@ mod tests {
 
                // Connect a commitment transaction, but only to the ChainMonitor/ChannelMonitor. The
                // channel is now closed, but the ChannelManager doesn't know that yet.
-               let new_header = BlockHeader {
-                       version: 2, time: 0, bits: 0, nonce: 0,
-                       prev_blockhash: nodes[0].best_block_info().0,
-                       merkle_root: TxMerkleNode::all_zeros() };
+               let new_header = create_dummy_header(nodes[0].best_block_info().0, 0);
                let conf_height = nodes[0].best_block_info().1 + 1;
                nodes[1].chain_monitor.chain_monitor.transactions_confirmed(&new_header,
                        &[(0, broadcast_tx)], conf_height);
index 21b4717e1d9786dfd2b0eb68092e41f7b3d19afd..45968c57e537077c2d583a24fe9702300d19a8cb 100644 (file)
@@ -513,7 +513,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
                                                OnchainClaim::Tx(tx) => {
                                                        let log_start = if bumped_feerate { "Broadcasting RBF-bumped" } else { "Rebroadcasting" };
                                                        log_info!(logger, "{} onchain {}", log_start, log_tx!(tx));
-                                                       broadcaster.broadcast_transaction(&tx);
+                                                       broadcaster.broadcast_transactions(&[&tx]);
                                                },
                                                #[cfg(anchors)]
                                                OnchainClaim::Event(event) => {
@@ -767,7 +767,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
                                let package_id = match claim {
                                        OnchainClaim::Tx(tx) => {
                                                log_info!(logger, "Broadcasting onchain {}", log_tx!(tx));
-                                               broadcaster.broadcast_transaction(&tx);
+                                               broadcaster.broadcast_transactions(&[&tx]);
                                                tx.txid().into_inner()
                                        },
                                        #[cfg(anchors)]
@@ -960,7 +960,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
                                match bump_claim {
                                        OnchainClaim::Tx(bump_tx) => {
                                                log_info!(logger, "Broadcasting RBF-bumped onchain {}", log_tx!(bump_tx));
-                                               broadcaster.broadcast_transaction(&bump_tx);
+                                               broadcaster.broadcast_transactions(&[&bump_tx]);
                                        },
                                        #[cfg(anchors)]
                                        OnchainClaim::Event(claim_event) => {
@@ -1046,7 +1046,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
                                match bump_claim {
                                        OnchainClaim::Tx(bump_tx) => {
                                                log_info!(logger, "Broadcasting onchain {}", log_tx!(bump_tx));
-                                               broadcaster.broadcast_transaction(&bump_tx);
+                                               broadcaster.broadcast_transactions(&[&bump_tx]);
                                        },
                                        #[cfg(anchors)]
                                        OnchainClaim::Event(claim_event) => {
index e66092222d83ad4b583db839ea08e94da0751ce4..4604a164cd634534169e46f0df40670f346172c3 100644 (file)
@@ -98,10 +98,11 @@ pub(crate) struct RevokedOutput {
        weight: u64,
        amount: u64,
        on_counterparty_tx_csv: u16,
+       is_counterparty_balance_on_anchors: Option<()>,
 }
 
 impl RevokedOutput {
-       pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, per_commitment_key: SecretKey, amount: u64, on_counterparty_tx_csv: u16) -> Self {
+       pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: PublicKey, counterparty_htlc_base_key: PublicKey, per_commitment_key: SecretKey, amount: u64, on_counterparty_tx_csv: u16, is_counterparty_balance_on_anchors: bool) -> Self {
                RevokedOutput {
                        per_commitment_point,
                        counterparty_delayed_payment_base_key,
@@ -109,7 +110,8 @@ impl RevokedOutput {
                        per_commitment_key,
                        weight: WEIGHT_REVOKED_OUTPUT,
                        amount,
-                       on_counterparty_tx_csv
+                       on_counterparty_tx_csv,
+                       is_counterparty_balance_on_anchors: if is_counterparty_balance_on_anchors { Some(()) } else { None }
                }
        }
 }
@@ -122,6 +124,7 @@ impl_writeable_tlv_based!(RevokedOutput, {
        (8, weight, required),
        (10, amount, required),
        (12, on_counterparty_tx_csv, required),
+       (14, is_counterparty_balance_on_anchors, option)
 });
 
 /// A struct to describe a revoked offered output and corresponding information to generate a
@@ -479,6 +482,24 @@ impl PackageSolvingData {
                };
                absolute_timelock
        }
+
+       fn map_output_type_flags(&self) -> (PackageMalleability, bool) {
+               // Post-anchor, aggregation of outputs of different types is unsafe. See https://github.com/lightning/bolts/pull/803.
+               let (malleability, aggregable) = match self {
+                       PackageSolvingData::RevokedOutput(RevokedOutput { is_counterparty_balance_on_anchors: Some(()), .. }) => { (PackageMalleability::Malleable, false) },
+                       PackageSolvingData::RevokedOutput(RevokedOutput { is_counterparty_balance_on_anchors: None, .. }) => { (PackageMalleability::Malleable, true) },
+                       PackageSolvingData::RevokedHTLCOutput(..) => { (PackageMalleability::Malleable, true) },
+                       PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => { (PackageMalleability::Malleable, true) },
+                       PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => { (PackageMalleability::Malleable, false) },
+                       PackageSolvingData::HolderHTLCOutput(ref outp) => if outp.opt_anchors() {
+                               (PackageMalleability::Malleable, outp.preimage.is_some())
+                       } else {
+                               (PackageMalleability::Untractable, false)
+                       },
+                       PackageSolvingData::HolderFundingOutput(..) => { (PackageMalleability::Untractable, false) },
+               };
+               (malleability, aggregable)
+       }
 }
 
 impl_writeable_tlv_based_enum!(PackageSolvingData, ;
@@ -491,8 +512,7 @@ impl_writeable_tlv_based_enum!(PackageSolvingData, ;
 );
 
 /// A malleable package might be aggregated with other packages to save on fees.
-/// A untractable package has been counter-signed and aggregable will break cached counterparty
-/// signatures.
+/// A untractable package has been counter-signed and aggregable will break cached counterparty signatures.
 #[derive(Clone, PartialEq, Eq)]
 pub(crate) enum PackageMalleability {
        Malleable,
@@ -826,19 +846,8 @@ impl PackageTemplate {
                }).is_some()
        }
 
-       pub (crate) fn build_package(txid: Txid, vout: u32, input_solving_data: PackageSolvingData, soonest_conf_deadline: u32, aggregable: bool, height_original: u32) -> Self {
-               let malleability = match input_solving_data {
-                       PackageSolvingData::RevokedOutput(..) => PackageMalleability::Malleable,
-                       PackageSolvingData::RevokedHTLCOutput(..) => PackageMalleability::Malleable,
-                       PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => PackageMalleability::Malleable,
-                       PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => PackageMalleability::Malleable,
-                       PackageSolvingData::HolderHTLCOutput(ref outp) => if outp.opt_anchors() {
-                               PackageMalleability::Malleable
-                       } else {
-                               PackageMalleability::Untractable
-                       },
-                       PackageSolvingData::HolderFundingOutput(..) => PackageMalleability::Untractable,
-               };
+       pub (crate) fn build_package(txid: Txid, vout: u32, input_solving_data: PackageSolvingData, soonest_conf_deadline: u32, height_original: u32) -> Self {
+               let (malleability, aggregable) = PackageSolvingData::map_output_type_flags(&input_solving_data);
                let mut inputs = Vec::with_capacity(1);
                inputs.push((BitcoinOutPoint { txid, vout }, input_solving_data));
                PackageTemplate {
@@ -880,18 +889,7 @@ impl Readable for PackageTemplate {
                        inputs.push((outpoint, rev_outp));
                }
                let (malleability, aggregable) = if let Some((_, lead_input)) = inputs.first() {
-                       match lead_input {
-                               PackageSolvingData::RevokedOutput(..) => { (PackageMalleability::Malleable, true) },
-                               PackageSolvingData::RevokedHTLCOutput(..) => { (PackageMalleability::Malleable, true) },
-                               PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => { (PackageMalleability::Malleable, true) },
-                               PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => { (PackageMalleability::Malleable, false) },
-                               PackageSolvingData::HolderHTLCOutput(ref outp) => if outp.opt_anchors() {
-                                       (PackageMalleability::Malleable, outp.preimage.is_some())
-                               } else {
-                                       (PackageMalleability::Untractable, false)
-                               },
-                               PackageSolvingData::HolderFundingOutput(..) => { (PackageMalleability::Untractable, false) },
-                       }
+                       PackageSolvingData::map_output_type_flags(&lead_input)
                } else { return Err(DecodeError::InvalidValue); };
                let mut soonest_conf_deadline = 0;
                let mut feerate_previous = 0;
@@ -1029,11 +1027,11 @@ mod tests {
        use bitcoin::secp256k1::Secp256k1;
 
        macro_rules! dumb_revk_output {
-               ($secp_ctx: expr) => {
+               ($secp_ctx: expr, $is_counterparty_balance_on_anchors: expr) => {
                        {
                                let dumb_scalar = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
                                let dumb_point = PublicKey::from_secret_key(&$secp_ctx, &dumb_scalar);
-                               PackageSolvingData::RevokedOutput(RevokedOutput::build(dumb_point, dumb_point, dumb_point, dumb_scalar, 0, 0))
+                               PackageSolvingData::RevokedOutput(RevokedOutput::build(dumb_point, dumb_point, dumb_point, dumb_scalar, 0, 0, $is_counterparty_balance_on_anchors))
                        }
                }
        }
@@ -1077,10 +1075,10 @@ mod tests {
        fn test_package_differing_heights() {
                let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
                let secp_ctx = Secp256k1::new();
-               let revk_outp = dumb_revk_output!(secp_ctx);
+               let revk_outp = dumb_revk_output!(secp_ctx, false);
 
-               let mut package_one_hundred = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, true, 100);
-               let package_two_hundred = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, true, 200);
+               let mut package_one_hundred = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, 100);
+               let package_two_hundred = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, 200);
                package_one_hundred.merge_package(package_two_hundred);
        }
 
@@ -1089,11 +1087,11 @@ mod tests {
        fn test_package_untractable_merge_to() {
                let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
                let secp_ctx = Secp256k1::new();
-               let revk_outp = dumb_revk_output!(secp_ctx);
+               let revk_outp = dumb_revk_output!(secp_ctx, false);
                let htlc_outp = dumb_htlc_output!();
 
-               let mut untractable_package = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, true, 100);
-               let malleable_package = PackageTemplate::build_package(txid, 1, htlc_outp.clone(), 1000, true, 100);
+               let mut untractable_package = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, 100);
+               let malleable_package = PackageTemplate::build_package(txid, 1, htlc_outp.clone(), 1000, 100);
                untractable_package.merge_package(malleable_package);
        }
 
@@ -1103,10 +1101,10 @@ mod tests {
                let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
                let secp_ctx = Secp256k1::new();
                let htlc_outp = dumb_htlc_output!();
-               let revk_outp = dumb_revk_output!(secp_ctx);
+               let revk_outp = dumb_revk_output!(secp_ctx, false);
 
-               let mut malleable_package = PackageTemplate::build_package(txid, 0, htlc_outp.clone(), 1000, true, 100);
-               let untractable_package = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, true, 100);
+               let mut malleable_package = PackageTemplate::build_package(txid, 0, htlc_outp.clone(), 1000, 100);
+               let untractable_package = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, 100);
                malleable_package.merge_package(untractable_package);
        }
 
@@ -1115,10 +1113,11 @@ mod tests {
        fn test_package_noaggregation_to() {
                let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
                let secp_ctx = Secp256k1::new();
-               let revk_outp = dumb_revk_output!(secp_ctx);
+               let revk_outp = dumb_revk_output!(secp_ctx, false);
+               let revk_outp_counterparty_balance = dumb_revk_output!(secp_ctx, true);
 
-               let mut noaggregation_package = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, false, 100);
-               let aggregation_package = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, true, 100);
+               let mut noaggregation_package = PackageTemplate::build_package(txid, 0, revk_outp_counterparty_balance.clone(), 1000, 100);
+               let aggregation_package = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, 100);
                noaggregation_package.merge_package(aggregation_package);
        }
 
@@ -1127,10 +1126,11 @@ mod tests {
        fn test_package_noaggregation_from() {
                let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
                let secp_ctx = Secp256k1::new();
-               let revk_outp = dumb_revk_output!(secp_ctx);
+               let revk_outp = dumb_revk_output!(secp_ctx, false);
+               let revk_outp_counterparty_balance = dumb_revk_output!(secp_ctx, true);
 
-               let mut aggregation_package = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, true, 100);
-               let noaggregation_package = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, false, 100);
+               let mut aggregation_package = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, 100);
+               let noaggregation_package = PackageTemplate::build_package(txid, 1, revk_outp_counterparty_balance.clone(), 1000, 100);
                aggregation_package.merge_package(noaggregation_package);
        }
 
@@ -1139,11 +1139,11 @@ mod tests {
        fn test_package_empty() {
                let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
                let secp_ctx = Secp256k1::new();
-               let revk_outp = dumb_revk_output!(secp_ctx);
+               let revk_outp = dumb_revk_output!(secp_ctx, false);
 
-               let mut empty_package = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, true, 100);
+               let mut empty_package = PackageTemplate::build_package(txid, 0, revk_outp.clone(), 1000, 100);
                empty_package.inputs = vec![];
-               let package = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, true, 100);
+               let package = PackageTemplate::build_package(txid, 1, revk_outp.clone(), 1000, 100);
                empty_package.merge_package(package);
        }
 
@@ -1152,11 +1152,11 @@ mod tests {
        fn test_package_differing_categories() {
                let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
                let secp_ctx = Secp256k1::new();
-               let revk_outp = dumb_revk_output!(secp_ctx);
+               let revk_outp = dumb_revk_output!(secp_ctx, false);
                let counterparty_outp = dumb_counterparty_output!(secp_ctx, 0, false);
 
-               let mut revoked_package = PackageTemplate::build_package(txid, 0, revk_outp, 1000, true, 100);
-               let counterparty_package = PackageTemplate::build_package(txid, 1, counterparty_outp, 1000, true, 100);
+               let mut revoked_package = PackageTemplate::build_package(txid, 0, revk_outp, 1000, 100);
+               let counterparty_package = PackageTemplate::build_package(txid, 1, counterparty_outp, 1000, 100);
                revoked_package.merge_package(counterparty_package);
        }
 
@@ -1164,13 +1164,13 @@ mod tests {
        fn test_package_split_malleable() {
                let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
                let secp_ctx = Secp256k1::new();
-               let revk_outp_one = dumb_revk_output!(secp_ctx);
-               let revk_outp_two = dumb_revk_output!(secp_ctx);
-               let revk_outp_three = dumb_revk_output!(secp_ctx);
+               let revk_outp_one = dumb_revk_output!(secp_ctx, false);
+               let revk_outp_two = dumb_revk_output!(secp_ctx, false);
+               let revk_outp_three = dumb_revk_output!(secp_ctx, false);
 
-               let mut package_one = PackageTemplate::build_package(txid, 0, revk_outp_one, 1000, true, 100);
-               let package_two = PackageTemplate::build_package(txid, 1, revk_outp_two, 1000, true, 100);
-               let package_three = PackageTemplate::build_package(txid, 2, revk_outp_three, 1000, true, 100);
+               let mut package_one = PackageTemplate::build_package(txid, 0, revk_outp_one, 1000, 100);
+               let package_two = PackageTemplate::build_package(txid, 1, revk_outp_two, 1000, 100);
+               let package_three = PackageTemplate::build_package(txid, 2, revk_outp_three, 1000, 100);
 
                package_one.merge_package(package_two);
                package_one.merge_package(package_three);
@@ -1193,7 +1193,7 @@ mod tests {
                let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
                let htlc_outp_one = dumb_htlc_output!();
 
-               let mut package_one = PackageTemplate::build_package(txid, 0, htlc_outp_one, 1000, true, 100);
+               let mut package_one = PackageTemplate::build_package(txid, 0, htlc_outp_one, 1000, 100);
                let ret_split = package_one.split_package(&BitcoinOutPoint { txid, vout: 0});
                assert!(ret_split.is_none());
        }
@@ -1202,9 +1202,9 @@ mod tests {
        fn test_package_timer() {
                let txid = Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap();
                let secp_ctx = Secp256k1::new();
-               let revk_outp = dumb_revk_output!(secp_ctx);
+               let revk_outp = dumb_revk_output!(secp_ctx, false);
 
-               let mut package = PackageTemplate::build_package(txid, 0, revk_outp, 1000, true, 100);
+               let mut package = PackageTemplate::build_package(txid, 0, revk_outp, 1000, 100);
                assert_eq!(package.timer(), 100);
                package.set_timer(101);
                assert_eq!(package.timer(), 101);
@@ -1216,7 +1216,7 @@ mod tests {
                let secp_ctx = Secp256k1::new();
                let counterparty_outp = dumb_counterparty_output!(secp_ctx, 1_000_000, false);
 
-               let package = PackageTemplate::build_package(txid, 0, counterparty_outp, 1000, true, 100);
+               let package = PackageTemplate::build_package(txid, 0, counterparty_outp, 1000, 100);
                assert_eq!(package.package_amount(), 1000);
        }
 
@@ -1229,15 +1229,15 @@ mod tests {
                let weight_sans_output = (4 + 4 + 1 + 36 + 4 + 1 + 1 + 8 + 1) * WITNESS_SCALE_FACTOR + 2;
 
                {
-                       let revk_outp = dumb_revk_output!(secp_ctx);
-                       let package = PackageTemplate::build_package(txid, 0, revk_outp, 0, true, 100);
+                       let revk_outp = dumb_revk_output!(secp_ctx, false);
+                       let package = PackageTemplate::build_package(txid, 0, revk_outp, 0, 100);
                        assert_eq!(package.package_weight(&Script::new()),  weight_sans_output + WEIGHT_REVOKED_OUTPUT as usize);
                }
 
                {
                        for &opt_anchors in [false, true].iter() {
                                let counterparty_outp = dumb_counterparty_output!(secp_ctx, 1_000_000, opt_anchors);
-                               let package = PackageTemplate::build_package(txid, 0, counterparty_outp, 1000, true, 100);
+                               let package = PackageTemplate::build_package(txid, 0, counterparty_outp, 1000, 100);
                                assert_eq!(package.package_weight(&Script::new()), weight_sans_output + weight_received_htlc(opt_anchors) as usize);
                        }
                }
@@ -1245,7 +1245,7 @@ mod tests {
                {
                        for &opt_anchors in [false, true].iter() {
                                let counterparty_outp = dumb_counterparty_offered_output!(secp_ctx, 1_000_000, opt_anchors);
-                               let package = PackageTemplate::build_package(txid, 0, counterparty_outp, 1000, true, 100);
+                               let package = PackageTemplate::build_package(txid, 0, counterparty_outp, 1000, 100);
                                assert_eq!(package.package_weight(&Script::new()), weight_sans_output + weight_offered_htlc(opt_anchors) as usize);
                        }
                }
index e7e7e0ede6bb137a802cfdd016bf86c0fa4df5f2..cea15b21ad2fba1b6f1937b030d59551bede682c 100644 (file)
@@ -54,9 +54,6 @@
 
 #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
 
-#![cfg_attr(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"), feature(test))]
-#[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))] extern crate test;
-
 #[cfg(not(any(feature = "std", feature = "no-std")))]
 compile_error!("at least one of the `std` or `no-std` features must be enabled");
 
@@ -74,6 +71,8 @@ extern crate core;
 
 #[cfg(not(feature = "std"))] extern crate core2;
 
+#[cfg(ldk_bench)] extern crate criterion;
+
 #[macro_use]
 pub mod util;
 pub mod chain;
@@ -177,7 +176,7 @@ mod prelude {
        pub use alloc::string::ToString;
 }
 
-#[cfg(all(not(feature = "_bench_unstable"), feature = "backtrace", feature = "std", test))]
+#[cfg(all(not(ldk_bench), feature = "backtrace", feature = "std", test))]
 extern crate backtrace;
 
 mod sync;
index d495910eed10242917f0c7ad713896fe7a5eaa02..e59cf47f17600963c9852389fc5723d8189816ef 100644 (file)
@@ -12,7 +12,6 @@
 //! There are a bunch of these as their handling is relatively error-prone so they are split out
 //! here. See also the chanmon_fail_consistency fuzz test.
 
-use bitcoin::blockdata::block::{Block, BlockHeader};
 use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::hash_types::BlockHash;
 use bitcoin::network::constants::Network;
@@ -35,7 +34,6 @@ use crate::util::test_utils;
 
 use crate::io;
 use bitcoin::hashes::Hash;
-use bitcoin::TxMerkleNode;
 use crate::prelude::*;
 use crate::sync::{Arc, Mutex};
 
@@ -121,15 +119,7 @@ fn test_monitor_and_persister_update_fail() {
                assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
                chain_mon
        };
-       let header = BlockHeader {
-               version: 0x20000000,
-               prev_blockhash: BlockHash::all_zeros(),
-               merkle_root: TxMerkleNode::all_zeros(),
-               time: 42,
-               bits: 42,
-               nonce: 42
-       };
-       chain_mon.chain_monitor.block_connected(&Block { header, txdata: vec![] }, 200);
+       chain_mon.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), 200);
 
        // Set the persister's return value to be a InProgress.
        persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
index fb413b22f2498590ed838bd172bc2176952db8e8..30eab9066fd0b4190ef65167d8e82c98c51c9be1 100644 (file)
@@ -4517,7 +4517,7 @@ where
 
                if let Some(tx) = funding_broadcastable {
                        log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
-                       self.tx_broadcaster.broadcast_transaction(&tx);
+                       self.tx_broadcaster.broadcast_transactions(&[&tx]);
                }
 
                {
@@ -5047,7 +5047,7 @@ where
                };
                if let Some(broadcast_tx) = tx {
                        log_info!(self.logger, "Broadcasting {}", log_tx!(broadcast_tx));
-                       self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
+                       self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
                }
                if let Some(chan) = chan_option {
                        if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
@@ -5657,7 +5657,7 @@ where
                                                                self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure);
 
                                                                log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
-                                                               self.tx_broadcaster.broadcast_transaction(&tx);
+                                                               self.tx_broadcaster.broadcast_transactions(&[&tx]);
                                                                update_maps_on_chan_removal!(self, chan);
                                                                false
                                                        } else { true }
@@ -9266,7 +9266,7 @@ mod tests {
        }
 }
 
-#[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))]
+#[cfg(ldk_bench)]
 pub mod bench {
        use crate::chain::Listen;
        use crate::chain::chainmonitor::{ChainMonitor, Persist};
@@ -9286,7 +9286,7 @@ pub mod bench {
 
        use crate::sync::{Arc, Mutex};
 
-       use test::Bencher;
+       use criterion::Criterion;
 
        type Manager<'a, P> = ChannelManager<
                &'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
@@ -9307,13 +9307,11 @@ pub mod bench {
                fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor> { None }
        }
 
-       #[cfg(test)]
-       #[bench]
-       fn bench_sends(bench: &mut Bencher) {
-               bench_two_sends(bench, test_utils::TestPersister::new(), test_utils::TestPersister::new());
+       pub fn bench_sends(bench: &mut Criterion) {
+               bench_two_sends(bench, "bench_sends", test_utils::TestPersister::new(), test_utils::TestPersister::new());
        }
 
-       pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Bencher, persister_a: P, persister_b: P) {
+       pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Criterion, bench_name: &str, persister_a: P, persister_b: P) {
                // Do a simple benchmark of sending a payment back and forth between two nodes.
                // Note that this is unrealistic as each payment send will require at least two fsync
                // calls per node.
@@ -9383,10 +9381,7 @@ pub mod bench {
 
                assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
 
-               let block = Block {
-                       header: BlockHeader { version: 0x20000000, prev_blockhash: BestBlock::from_network(network).block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
-                       txdata: vec![tx],
-               };
+               let block = create_dummy_block(BestBlock::from_network(network).block_hash(), 42, vec![tx]);
                Listen::block_connected(&node_a, &block, 1);
                Listen::block_connected(&node_b, &block, 1);
 
@@ -9469,9 +9464,9 @@ pub mod bench {
                        }
                }
 
-               bench.iter(|| {
+               bench.bench_function(bench_name, |b| b.iter(|| {
                        send_payment!(node_a, node_b);
                        send_payment!(node_b, node_a);
-               });
+               }));
        }
 }
index 7d4b86f5acabf6ae81d5f0df9b13f3090fb11d29..b8087546c7445060230a5ad776a9b3f13c113d37 100644 (file)
@@ -422,8 +422,10 @@ pub struct Features<T: sealed::Context> {
        mark: PhantomData<T>,
 }
 
-impl <T: sealed::Context> Features<T> {
-       pub(crate) fn or(mut self, o: Self) -> Self {
+impl<T: sealed::Context> core::ops::BitOr for Features<T> {
+       type Output = Self;
+
+       fn bitor(mut self, o: Self) -> Self {
                let total_feature_len = cmp::max(self.flags.len(), o.flags.len());
                self.flags.resize(total_feature_len, 0u8);
                for (byte, o_byte) in self.flags.iter_mut().zip(o.flags.iter()) {
@@ -695,6 +697,25 @@ impl<T: sealed::Context> Features<T> {
                self.flags.iter().any(|&byte| (byte & 0b10_10_10_10) != 0)
        }
 
+       /// Returns true if this `Features` object contains required features unknown by `other`.
+       pub fn requires_unknown_bits_from(&self, other: &Features<T>) -> bool {
+               // Bitwise AND-ing with all even bits set except for known features will select required
+               // unknown features.
+               self.flags.iter().enumerate().any(|(i, &byte)| {
+                       const REQUIRED_FEATURES: u8 = 0b01_01_01_01;
+                       const OPTIONAL_FEATURES: u8 = 0b10_10_10_10;
+                       let unknown_features = if i < other.flags.len() {
+                               // Form a mask similar to !T::KNOWN_FEATURE_MASK only for `other`
+                               !(other.flags[i]
+                                       | ((other.flags[i] >> 1) & REQUIRED_FEATURES)
+                                       | ((other.flags[i] << 1) & OPTIONAL_FEATURES))
+                       } else {
+                               0b11_11_11_11
+                       };
+                       (byte & (REQUIRED_FEATURES & unknown_features)) != 0
+               })
+       }
+
        /// Returns true if this `Features` object contains unknown feature flags which are set as
        /// "required".
        pub fn requires_unknown_bits(&self) -> bool {
@@ -743,6 +764,50 @@ impl<T: sealed::Context> Features<T> {
                }
                true
        }
+
+       /// Sets a required custom feature bit. Errors if `bit` is outside the custom range as defined
+       /// by [bLIP 2] or if it is a known `T` feature.
+       ///
+       /// Note: Required bits are even. If an odd bit is given, then the corresponding even bit will
+       /// be set instead (i.e., `bit - 1`).
+       ///
+       /// [bLIP 2]: https://github.com/lightning/blips/blob/master/blip-0002.md#feature-bits
+       pub fn set_required_custom_bit(&mut self, bit: usize) -> Result<(), ()> {
+               self.set_custom_bit(bit - (bit % 2))
+       }
+
+       /// Sets an optional custom feature bit. Errors if `bit` is outside the custom range as defined
+       /// by [bLIP 2] or if it is a known `T` feature.
+       ///
+       /// Note: Optional bits are odd. If an even bit is given, then the corresponding odd bit will be
+       /// set instead (i.e., `bit + 1`).
+       ///
+       /// [bLIP 2]: https://github.com/lightning/blips/blob/master/blip-0002.md#feature-bits
+       pub fn set_optional_custom_bit(&mut self, bit: usize) -> Result<(), ()> {
+               self.set_custom_bit(bit + (1 - (bit % 2)))
+       }
+
+       fn set_custom_bit(&mut self, bit: usize) -> Result<(), ()> {
+               if bit < 256 {
+                       return Err(());
+               }
+
+               let byte_offset = bit / 8;
+               let mask = 1 << (bit - 8 * byte_offset);
+               if byte_offset < T::KNOWN_FEATURE_MASK.len() {
+                       if (T::KNOWN_FEATURE_MASK[byte_offset] & mask) != 0 {
+                               return Err(());
+                       }
+               }
+
+               if self.flags.len() <= byte_offset {
+                       self.flags.resize(byte_offset + 1, 0u8);
+               }
+
+               self.flags[byte_offset] |= mask;
+
+               Ok(())
+       }
 }
 
 impl<T: sealed::UpfrontShutdownScript> Features<T> {
@@ -869,6 +934,43 @@ mod tests {
                assert!(features.supports_unknown_bits());
        }
 
+       #[test]
+       fn requires_unknown_bits_from() {
+               let mut features1 = InitFeatures::empty();
+               let mut features2 = InitFeatures::empty();
+               assert!(!features1.requires_unknown_bits_from(&features2));
+               assert!(!features2.requires_unknown_bits_from(&features1));
+
+               features1.set_data_loss_protect_required();
+               assert!(features1.requires_unknown_bits_from(&features2));
+               assert!(!features2.requires_unknown_bits_from(&features1));
+
+               features2.set_data_loss_protect_optional();
+               assert!(!features1.requires_unknown_bits_from(&features2));
+               assert!(!features2.requires_unknown_bits_from(&features1));
+
+               features2.set_gossip_queries_required();
+               assert!(!features1.requires_unknown_bits_from(&features2));
+               assert!(features2.requires_unknown_bits_from(&features1));
+
+               features1.set_gossip_queries_optional();
+               assert!(!features1.requires_unknown_bits_from(&features2));
+               assert!(!features2.requires_unknown_bits_from(&features1));
+
+               features1.set_variable_length_onion_required();
+               assert!(features1.requires_unknown_bits_from(&features2));
+               assert!(!features2.requires_unknown_bits_from(&features1));
+
+               features2.set_variable_length_onion_optional();
+               assert!(!features1.requires_unknown_bits_from(&features2));
+               assert!(!features2.requires_unknown_bits_from(&features1));
+
+               features1.set_basic_mpp_required();
+               features2.set_wumbo_required();
+               assert!(features1.requires_unknown_bits_from(&features2));
+               assert!(features2.requires_unknown_bits_from(&features1));
+       }
+
        #[test]
        fn convert_to_context_with_relevant_flags() {
                let mut init_features = InitFeatures::empty();
@@ -881,12 +983,12 @@ mod tests {
                init_features.set_payment_secret_required();
                init_features.set_basic_mpp_optional();
                init_features.set_wumbo_optional();
+               init_features.set_anchors_zero_fee_htlc_tx_optional();
                init_features.set_shutdown_any_segwit_optional();
                init_features.set_onion_messages_optional();
                init_features.set_channel_type_optional();
                init_features.set_scid_privacy_optional();
                init_features.set_zero_conf_optional();
-               init_features.set_anchors_zero_fee_htlc_tx_optional();
 
                assert!(init_features.initial_routing_sync());
                assert!(!init_features.supports_upfront_shutdown_script());
@@ -897,7 +999,7 @@ mod tests {
                        // Check that the flags are as expected:
                        // - option_data_loss_protect (req)
                        // - var_onion_optin (req) | static_remote_key (req) | payment_secret(req)
-                       // - basic_mpp | wumbo
+                       // - basic_mpp | wumbo | anchors_zero_fee_htlc_tx
                        // - opt_shutdown_anysegwit
                        // - onion_messages
                        // - option_channel_type | option_scid_alias
@@ -945,6 +1047,36 @@ mod tests {
                assert!(features.supports_payment_secret());
        }
 
+       #[test]
+       fn set_custom_bits() {
+               let mut features = InvoiceFeatures::empty();
+               features.set_variable_length_onion_optional();
+               assert_eq!(features.flags[1], 0b00000010);
+
+               assert!(features.set_optional_custom_bit(255).is_err());
+               assert!(features.set_required_custom_bit(256).is_ok());
+               assert!(features.set_required_custom_bit(258).is_ok());
+               assert_eq!(features.flags[31], 0b00000000);
+               assert_eq!(features.flags[32], 0b00000101);
+
+               let known_bit = <sealed::InvoiceContext as sealed::PaymentSecret>::EVEN_BIT;
+               let byte_offset = <sealed::InvoiceContext as sealed::PaymentSecret>::BYTE_OFFSET;
+               assert_eq!(byte_offset, 1);
+               assert_eq!(features.flags[byte_offset], 0b00000010);
+               assert!(features.set_required_custom_bit(known_bit).is_err());
+               assert_eq!(features.flags[byte_offset], 0b00000010);
+
+               let mut features = InvoiceFeatures::empty();
+               assert!(features.set_optional_custom_bit(256).is_ok());
+               assert!(features.set_optional_custom_bit(259).is_ok());
+               assert_eq!(features.flags[32], 0b00001010);
+
+               let mut features = InvoiceFeatures::empty();
+               assert!(features.set_required_custom_bit(257).is_ok());
+               assert!(features.set_required_custom_bit(258).is_ok());
+               assert_eq!(features.flags[32], 0b00000101);
+       }
+
        #[test]
        fn encodes_features_without_length() {
                let features = OfferFeatures::from_le_bytes(vec![1, 2, 3, 4, 5, 42, 100, 101]);
index a75d0c9228548aa86d4a8944f214948215da0ba9..93841517ba75ecfbc154382c2340d238cda93daf 100644 (file)
@@ -85,16 +85,14 @@ pub fn confirm_transactions_at<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, txn:
        if conf_height > first_connect_height {
                connect_blocks(node, conf_height - first_connect_height);
        }
-       let mut block = Block {
-               header: BlockHeader { version: 0x20000000, prev_blockhash: node.best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: conf_height, bits: 42, nonce: 42 },
-               txdata: Vec::new(),
-       };
+       let mut txdata = Vec::new();
        for _ in 0..*node.network_chan_count.borrow() { // Make sure we don't end up with channels at the same short id by offsetting by chan_count
-               block.txdata.push(Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() });
+               txdata.push(Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() });
        }
        for tx in txn {
-               block.txdata.push((*tx).clone());
+               txdata.push((*tx).clone());
        }
+       let block = create_dummy_block(node.best_block_hash(), conf_height, txdata);
        connect_block(node, &block);
        scid_utils::scid_from_parts(conf_height as u64, block.txdata.len() as u64 - 1, 0).unwrap()
 }
@@ -191,22 +189,31 @@ impl ConnectStyle {
        }
 }
 
+pub fn create_dummy_header(prev_blockhash: BlockHash, time: u32) -> BlockHeader {
+       BlockHeader {
+               version: 0x2000_0000,
+               prev_blockhash,
+               merkle_root: TxMerkleNode::all_zeros(),
+               time,
+               bits: 42,
+               nonce: 42,
+       }
+}
+
+pub fn create_dummy_block(prev_blockhash: BlockHash, time: u32, txdata: Vec<Transaction>) -> Block {
+       Block { header: create_dummy_header(prev_blockhash, time), txdata }
+}
+
 pub fn connect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, depth: u32) -> BlockHash {
        let skip_intermediaries = node.connect_style.borrow().skips_blocks();
 
        let height = node.best_block_info().1 + 1;
-       let mut block = Block {
-               header: BlockHeader { version: 0x2000000, prev_blockhash: node.best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 },
-               txdata: vec![],
-       };
+       let mut block = create_dummy_block(node.best_block_hash(), height, Vec::new());
        assert!(depth >= 1);
        for i in 1..depth {
                let prev_blockhash = block.header.block_hash();
                do_connect_block(node, block, skip_intermediaries);
-               block = Block {
-                       header: BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: TxMerkleNode::all_zeros(), time: height + i, bits: 42, nonce: 42 },
-                       txdata: vec![],
-               };
+               block = create_dummy_block(prev_blockhash, height + i, Vec::new());
        }
        let hash = block.header.block_hash();
        do_connect_block(node, block, false);
@@ -1769,7 +1776,7 @@ macro_rules! get_route_and_payment_hash {
 }
 
 #[macro_export]
-#[cfg(any(test, feature = "_bench_unstable", feature = "_test_utils"))]
+#[cfg(any(test, ldk_bench, feature = "_test_utils"))]
 macro_rules! expect_payment_claimable {
        ($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr) => {
                expect_payment_claimable!($node, $expected_payment_hash, $expected_payment_secret, $expected_recv_value, None, $node.node.get_our_node_id())
@@ -1796,7 +1803,7 @@ macro_rules! expect_payment_claimable {
 }
 
 #[macro_export]
-#[cfg(any(test, feature = "_bench_unstable", feature = "_test_utils"))]
+#[cfg(any(test, ldk_bench, feature = "_test_utils"))]
 macro_rules! expect_payment_claimed {
        ($node: expr, $expected_payment_hash: expr, $expected_recv_value: expr) => {
                let events = $node.node.get_and_clear_pending_events();
@@ -1913,7 +1920,7 @@ macro_rules! expect_payment_forwarded {
        }
 }
 
-#[cfg(any(test, feature = "_bench_unstable", feature = "_test_utils"))]
+#[cfg(any(test, ldk_bench, feature = "_test_utils"))]
 pub fn expect_channel_pending_event<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, expected_counterparty_node_id: &PublicKey) {
        let events = node.node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
@@ -1925,7 +1932,7 @@ pub fn expect_channel_pending_event<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>,
        }
 }
 
-#[cfg(any(test, feature = "_bench_unstable", feature = "_test_utils"))]
+#[cfg(any(test, ldk_bench, feature = "_test_utils"))]
 pub fn expect_channel_ready_event<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, expected_counterparty_node_id: &PublicKey) {
        let events = node.node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
index da98153a5606e9410798f343287e1770a2c3a09a..98fc4bd95b6d58f100ae09aaa32519e57237598c 100644 (file)
@@ -38,12 +38,11 @@ use crate::util::string::UntrustedString;
 use crate::util::config::UserConfig;
 
 use bitcoin::hash_types::BlockHash;
-use bitcoin::blockdata::block::{Block, BlockHeader};
 use bitcoin::blockdata::script::{Builder, Script};
 use bitcoin::blockdata::opcodes;
 use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::network::constants::Network;
-use bitcoin::{PackedLockTime, Sequence, Transaction, TxIn, TxMerkleNode, TxOut, Witness};
+use bitcoin::{PackedLockTime, Sequence, Transaction, TxIn, TxOut, Witness};
 use bitcoin::OutPoint as BitcoinOutPoint;
 
 use bitcoin::secp256k1::Secp256k1;
@@ -509,10 +508,7 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        if steps & 0b1000_0000 != 0{
-               let block = Block {
-                       header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
-                       txdata: vec![],
-               };
+               let block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
                connect_block(&nodes[0], &block);
                connect_block(&nodes[1], &block);
        }
@@ -2763,8 +2759,7 @@ fn test_htlc_on_chain_success() {
        assert_eq!(node_txn[1].lock_time.0, 0);
 
        // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
-       connect_block(&nodes[1], &Block { header, txdata: vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()]});
+       connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()]));
        connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
        {
                let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
@@ -2900,8 +2895,7 @@ fn test_htlc_on_chain_success() {
        // we already checked the same situation with A.
 
        // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
-       let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
-       connect_block(&nodes[0], &Block { header, txdata: vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()] });
+       connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()]));
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
@@ -3463,15 +3457,15 @@ fn test_htlc_ignore_latest_remote_commitment() {
        assert_eq!(node_txn.len(), 3);
        assert_eq!(node_txn[0].txid(), node_txn[1].txid());
 
-       let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
-       connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]});
+       let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[1].clone()]);
+       connect_block(&nodes[1], &block);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
 
        // Duplicate the connect_block call since this may happen due to other listeners
        // registering new transactions
-       connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[2].clone()]});
+       connect_block(&nodes[1], &block);
 }
 
 #[test]
@@ -4152,10 +4146,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) {
                route_payment(&nodes[0], &[&nodes[1]], 100000).1
        };
 
-       let mut block = Block {
-               header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
-               txdata: vec![],
-       };
+       let mut block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
        connect_block(&nodes[0], &block);
        connect_block(&nodes[1], &block);
        let block_count = TEST_FINAL_CLTV + CHAN_CONFIRM_DEPTH + 2 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS;
@@ -4534,8 +4525,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        assert_ne!(revoked_htlc_txn[0].lock_time.0, 0); // HTLC-Timeout
 
        // B will generate justice tx from A's revoked commitment/HTLC tx
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
-       connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] });
+       connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
@@ -4605,8 +4595,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
        assert_eq!(revoked_local_txn[0].output[unspent_local_txn_output].script_pubkey.len(), 2 + 20); // P2WPKH
 
        // A will generate justice tx from B's revoked commitment/HTLC tx
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
-       connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] });
+       connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
        check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
@@ -4699,8 +4688,7 @@ fn test_onchain_to_onchain_claim() {
        assert_eq!(c_txn[0].lock_time.0, 0); // Success tx
 
        // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
-       connect_block(&nodes[1], &Block { header, txdata: vec![commitment_tx[0].clone(), c_txn[0].clone()]});
+       connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), c_txn[0].clone()]));
        check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 2);
@@ -5457,10 +5445,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
        check_added_monitors!(nodes[1], 1);
 
        let starting_block = nodes[1].best_block_info();
-       let mut block = Block {
-               header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
-               txdata: vec![],
-       };
+       let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
        for _ in starting_block.1 + 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + starting_block.1 + 2 {
                connect_block(&nodes[1], &block);
                block.header.prev_blockhash = block.block_hash();
@@ -5490,11 +5475,11 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
        // to "time out" the HTLC.
 
        let starting_block = nodes[1].best_block_info();
-       let mut header = BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
+       let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
 
        for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 {
-               connect_block(&nodes[0], &Block { header, txdata: Vec::new()});
-               header.prev_blockhash = header.block_hash();
+               connect_block(&nodes[0], &block);
+               block.header.prev_blockhash = block.block_hash();
        }
        test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
        check_closed_broadcast!(nodes[0], true);
@@ -5536,10 +5521,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
        }
 
        let starting_block = nodes[1].best_block_info();
-       let mut block = Block {
-               header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
-               txdata: vec![],
-       };
+       let mut block = create_dummy_block(starting_block.0, 42, Vec::new());
        for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 {
                connect_block(&nodes[0], &block);
                block.header.prev_blockhash = block.block_hash();
@@ -7212,8 +7194,7 @@ fn test_bump_penalty_txn_on_revoked_commitment() {
 
        // Actually revoke tx by claiming a HTLC
        claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: header_114, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
-       connect_block(&nodes[1], &Block { header, txdata: vec![revoked_txn[0].clone()] });
+       connect_block(&nodes[1], &create_dummy_block(header_114, 42, vec![revoked_txn[0].clone()]));
        check_added_monitors!(nodes[1], 1);
 
        // One or more justice tx should have been broadcast, check it
@@ -7312,9 +7293,8 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        // Revoke local commitment tx
        claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
 
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx
-       connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] });
+       connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()]));
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
@@ -7338,10 +7318,10 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
 
        // Broadcast set of revoked txn on A
        let hash_128 = connect_blocks(&nodes[0], 40);
-       let header_11 = BlockHeader { version: 0x20000000, prev_blockhash: hash_128, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
-       connect_block(&nodes[0], &Block { header: header_11, txdata: vec![revoked_local_txn[0].clone()] });
-       let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
-       connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()] });
+       let block_11 = create_dummy_block(hash_128, 42, vec![revoked_local_txn[0].clone()]);
+       connect_block(&nodes[0], &block_11);
+       let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]);
+       connect_block(&nodes[0], &block_129);
        let events = nodes[0].node.get_and_clear_pending_events();
        expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
        match events.last().unwrap() {
@@ -7392,10 +7372,10 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        }
 
        // Connect one more block to see if bumped penalty are issued for HTLC txn
-       let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: header_129.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
-       connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn });
-       let header_131 = BlockHeader { version: 0x20000000, prev_blockhash: header_130.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
-       connect_block(&nodes[0], &Block { header: header_131, txdata: Vec::new() });
+       let block_130 = create_dummy_block(block_129.block_hash(), 42, penalty_txn);
+       connect_block(&nodes[0], &block_130);
+       let block_131 = create_dummy_block(block_130.block_hash(), 42, Vec::new());
+       connect_block(&nodes[0], &block_131);
 
        // Few more blocks to confirm penalty txn
        connect_blocks(&nodes[0], 4);
@@ -7417,8 +7397,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
                txn
        };
        // Broadcast claim txn and confirm blocks to avoid further bumps on this outputs
-       let header_145 = BlockHeader { version: 0x20000000, prev_blockhash: header_144, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
-       connect_block(&nodes[0], &Block { header: header_145, txdata: node_txn });
+       connect_block(&nodes[0], &create_dummy_block(header_144, 42, node_txn));
        connect_blocks(&nodes[0], 20);
        {
                let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -7630,8 +7609,7 @@ fn test_bump_txn_sanitize_tracking_maps() {
                node_txn.clear();
                penalty_txn
        };
-       let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
-       connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn });
+       connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, penalty_txn));
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
        {
                let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(OutPoint { txid: chan.3.txid(), index: 0 }).unwrap();
@@ -8275,14 +8253,7 @@ fn test_secret_timeout() {
        } else { panic!(); }
        let mut block = {
                let node_1_blocks = nodes[1].blocks.lock().unwrap();
-               Block {
-                       header: BlockHeader {
-                               version: 0x2000000,
-                               prev_blockhash: node_1_blocks.last().unwrap().0.block_hash(),
-                               merkle_root: TxMerkleNode::all_zeros(),
-                               time: node_1_blocks.len() as u32 + 7200, bits: 42, nonce: 42 },
-                       txdata: vec![],
-               }
+               create_dummy_block(node_1_blocks.last().unwrap().0.block_hash(), node_1_blocks.len() as u32 + 7200, Vec::new())
        };
        connect_block(&nodes[1], &block);
        if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2) {
@@ -8430,8 +8401,7 @@ fn test_update_err_monitor_lockdown() {
                assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
                watchtower
        };
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
-       let block = Block { header, txdata: vec![] };
+       let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
        // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
        // transaction lock time requirements here.
        chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (block.clone(), 200));
@@ -8501,8 +8471,7 @@ fn test_concurrent_monitor_claim() {
                assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
                watchtower
        };
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
-       let block = Block { header, txdata: vec![] };
+       let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
        // Make Alice aware of enough blocks that it doesn't think we're violating transaction lock time
        // requirements here.
        const HTLC_TIMEOUT_BROADCAST: u32 = CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS;
@@ -8533,8 +8502,7 @@ fn test_concurrent_monitor_claim() {
                assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
                watchtower
        };
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
-       watchtower_bob.chain_monitor.block_connected(&Block { header, txdata: vec![] }, HTLC_TIMEOUT_BROADCAST - 1);
+       watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST - 1);
 
        // Route another payment to generate another update with still previous HTLC pending
        let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 3000000);
@@ -8560,8 +8528,7 @@ fn test_concurrent_monitor_claim() {
        check_added_monitors!(nodes[0], 1);
 
        //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
-       watchtower_bob.chain_monitor.block_connected(&Block { header, txdata: vec![] }, HTLC_TIMEOUT_BROADCAST);
+       watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST);
 
        // Watchtower Bob should have broadcast a commitment/HTLC-timeout
        let bob_state_y;
@@ -8572,12 +8539,11 @@ fn test_concurrent_monitor_claim() {
        };
 
        // We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: BlockHash::all_zeros(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        let height = HTLC_TIMEOUT_BROADCAST + 1;
        connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
        check_closed_broadcast(&nodes[0], 1, true);
        check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false);
-       watchtower_alice.chain_monitor.block_connected(&Block { header, txdata: vec![bob_state_y.clone()] }, height);
+       watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
        check_added_monitors(&nodes[0], 1);
        {
                let htlc_txn = alice_broadcaster.txn_broadcast();
@@ -8652,11 +8618,11 @@ fn test_htlc_no_detection() {
        check_spends!(local_txn[0], chan_1.3);
 
        // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
-       connect_block(&nodes[0], &Block { header, txdata: vec![local_txn[0].clone()] });
+       let block = create_dummy_block(nodes[0].best_block_hash(), 42, vec![local_txn[0].clone()]);
+       connect_block(&nodes[0], &block);
        // We deliberately connect the local tx twice as this should provoke a failure calling
        // this test before #653 fix.
-       chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &Block { header, txdata: vec![local_txn[0].clone()] }, nodes[0].best_block_info().1 + 1);
+       chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &block, nodes[0].best_block_info().1 + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
        check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
@@ -8671,8 +8637,7 @@ fn test_htlc_no_detection() {
                node_txn[0].clone()
        };
 
-       let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
-       connect_block(&nodes[0], &Block { header: header_201, txdata: vec![htlc_timeout.clone()] });
+       connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![htlc_timeout.clone()]));
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
        expect_payment_failed!(nodes[0], our_payment_hash, false);
 }
@@ -8732,8 +8697,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                        true => alice_txn.clone(),
                        false => get_local_commitment_txn!(nodes[1], chan_ab.2)
                };
-               let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
-               connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]});
+               connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
                if broadcast_alice {
                        check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
@@ -8812,8 +8776,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
        let mut txn_to_broadcast = alice_txn.clone();
        if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); }
        if !go_onchain_before_fulfill {
-               let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42};
-               connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]});
+               connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]));
                // If Bob was the one to force-close, he will have already passed these checks earlier.
                if broadcast_alice {
                        check_closed_broadcast!(nodes[1], true);
index 6856bbcd6dfc498b7d97d4f685b9a99e8ee79178..5fa39137cf4577f41f85a57b95aedf067ea43e5b 100644 (file)
@@ -1696,7 +1696,7 @@ fn do_test_restored_packages_retry(check_old_monitor_retries_after_upgrade: bool
        // old `ChannelMonitor` that did not exercise said rebroadcasting logic.
        if check_old_monitor_retries_after_upgrade {
                let serialized_monitor = hex::decode(
-                       "0101fffffffffffffffff9550f22c95100160014d5a9aa98b89acc215fc3d23d6fec0ad59ca3665f00002200204c5f18e5e95b184f34d02ba6de8a2a4e36ae3d4ec87299ad81f3284dc7195c6302d7dde8e10a5a22c9bd0d7ef5494d85683ac050253b917615d4f97af633f0a8e2035f5e9d58b4328566223c107d86cf853e6b9fae1d26ff6d969be0178d1423c4ea0016001467822698d782e8421ebdf96d010de99382b7ec2300160014caf6d80fe2bab80473b021f57588a9c384bf23170000000000000000000000004d49e5da0000000000000000000000000000002a0270b20ad0f2c2bb30a55590fc77778495bc1b38c96476901145dda57491237f0f74c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e00000022002034c0cc0ad0dd5fe61dcf7ef58f995e3d34f8dbd24aa2a6fae68fefe102bf025c21391732ce658e1fe167300bb689a81e7db5399b9ee4095e217b0e997e8dd3d17a0000000000000000004a002103adde8029d3ee281a32e9db929b39f503ff9d7e93cd308eb157955344dc6def84022103205087e2dc1f6b9937e887dfa712c5bdfa950b01dbda3ebac4c85efdde48ee6a04020090004752210307a78def56cba9fc4db22a25928181de538ee59ba1a475ae113af7790acd0db32103c21e841cbc0b48197d060c71e116c185fa0ac281b7d0aa5924f535154437ca3b52ae00000000000186a0ffffffffffff0291e7c0a3232fb8650a6b4089568a81062b48a768780e5a74bb4a4a74e33aec2c029d5760248ec86c4a76d9df8308555785a06a65472fb995f5b392d520bbd000650090c1c94b11625690c9d84c5daa67b6ad19fcc7f9f23e194384140b08fcab9e8e810000ffffffffffff000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000002391732ce658e1fe167300bb689a81e7db5399b9ee4095e217b0e997e8dd3d17a00000000000000010000000000009896800000005166687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f292505000000009c009900202d704fbfe342a9ff6eaca14d80a24aaed0e680bbbdd36157b6f2798c61d906910120f9fe5e552aa0fc45020f0505efde432a4e373e5d393863973a6899f8c26d33d10208000000000098968004494800210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23020500030241000408000001000000000006020000080800000000009896800a0400000046167c86cc0e598a6b541f7c9bf9ef17222e4a76f636e2d22185aeadd2b02d029c00000000000000000000000000000000000000000000000166687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925fffffffffffe01e3002004f8eda5676356f539169a8e9a1e86c7f125283328d6f4bded1b939b52a6a7e30108000000000000c299022103a1f98e85886df54add6908b4fc1ff515e44aedefe9eb9c02879c89994298fa79042103a650bf03971df0176c7b412247390ef717853e8bd487b204dccc2fe2078bb75206210390bbbcebe9f70ba5dfd98866a79f72f75e0a6ea550ef73b202dd87cd6477350a08210284152d57908488e666e872716a286eb670b3d06cbeebf3f2e4ad350e01ec5e5b0a2102295e2de39eb3dcc2882f8cc266df7882a8b6d2c32aa08799f49b693aad3be28e0c04000000fd0e00fd01fe002045cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d01080000000000009b5e0221035f5e9d58b4328566223c107d86cf853e6b9fae1d26ff6d969be0178d1423c4ea04210230fde9c031f487db95ff55b7c0acbe0c7c26a8d82615e9184416bd350101616706210225afb4e88eac8b47b67adeaf085f5eb5d37d936f56138f0848de3d104edf113208210208e4687a95c172b86b920c3bc5dbd5f023094ec2cb0abdb74f9b624f45740df90a2102d7dde8e10a5a22c9bd0d7ef5494d85683ac050253b917615d4f97af633f0a8e20c04000000fd0efd01193b00010102080000000000989680040400000051062066687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925080400000000417e2650c201383711eed2a7cb8652c3e77ee6a395e81849c5c222217ed68b333c0ca9f1e900662ae68a7359efa7ef9d90613f2a62f7c3ff90f8c25e2cc974c9d39c009900202d704fbfe342a9ff6eaca14d80a24aaed0e680bbbdd36157b6f2798c61d906910120f9fe5e552aa0fc45020f0505efde432a4e373e5d393863973a6899f8c26d33d10208000000000098968004494800210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23020500030241000408000001000000000006020000080800000000009896800a0400000046fffffffffffefffffffffffe000000000000000000000000000000000000000000000000ffe099e83ae3761c7f1b781d22613bd1f6977e9ad59fae12b3eba34462ee8a3d000000500000000000000002fd01da002045cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d01fd01840200000000010174c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e00000000000f55f9800310270000000000002200208309b406e3b96e76cde414fbb8f5159f5b25b24075656c6382cec797854d53495e9b0000000000002200204c5f18e5e95b184f34d02ba6de8a2a4e36ae3d4ec87299ad81f3284dc7195c6350c300000000000016001425df8ec4a074f80579fed67d4707d5ec8ed7e8d304004730440220671c9badf26bd3a1ebd2d17020c6be20587d7822530daacc52c28839875eaec602204b575a21729ed27311f6d79fdf6fe8702b0a798f7d842e39ede1b56f249a613401473044022016a0da36f70cbf5d889586af88f238982889dc161462c56557125c7acfcb69e9022036ae10c6cc8cbc3b27d9e9ef6babb556086585bc819f252208bd175286699fdd014752210307a78def56cba9fc4db22a25928181de538ee59ba1a475ae113af7790acd0db32103c21e841cbc0b48197d060c71e116c185fa0ac281b7d0aa5924f535154437ca3b52ae50c9222002040000000b03209452ca8c90d4c78928b80ec41398f2a890324d8ad6e6c81408a0cb9b8d977b070406030400020090fd02a1002045cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d01fd01840200000000010174c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e00000000000f55f9800310270000000000002200208309b406e3b96e76cde414fbb8f5159f5b25b24075656c6382cec797854d53495e9b0000000000002200204c5f18e5e95b184f34d02ba6de8a2a4e36ae3d4ec87299ad81f3284dc7195c6350c300000000000016001425df8ec4a074f80579fed67d4707d5ec8ed7e8d304004730440220671c9badf26bd3a1ebd2d17020c6be20587d7822530daacc52c28839875eaec602204b575a21729ed27311f6d79fdf6fe8702b0a798f7d842e39ede1b56f249a613401473044022016a0da36f70cbf5d889586af88f238982889dc161462c56557125c7acfcb69e9022036ae10c6cc8cbc3b27d9e9ef6babb556086585bc819f252208bd175286699fdd014752210307a78def56cba9fc4db22a25928181de538ee59ba1a475ae113af7790acd0db32103c21e841cbc0b48197d060c71e116c185fa0ac281b7d0aa5924f535154437ca3b52ae50c9222002040000000b03209452ca8c90d4c78928b80ec41398f2a890324d8ad6e6c81408a0cb9b8d977b0704cd01cb00c901c7002245cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d0001022102d7dde8e10a5a22c9bd0d7ef5494d85683ac050253b917615d4f97af633f0a8e204020090062b5e9b0000000000002200204c5f18e5e95b184f34d02ba6de8a2a4e36ae3d4ec87299ad81f3284dc7195c630821035f5e9d58b4328566223c107d86cf853e6b9fae1d26ff6d969be0178d1423c4ea0a200000000000000000000000004d49e5da0000000000000000000000000000002a0c0800000000000186a0000000000000000274c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e0000000000000001000000000022002034c0cc0ad0dd5fe61dcf7ef58f995e3d34f8dbd24aa2a6fae68fefe102bf025c45cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d000000000000000100000000002200208309b406e3b96e76cde414fbb8f5159f5b25b24075656c6382cec797854d5349010100160014d5a9aa98b89acc215fc3d23d6fec0ad59ca3665ffd027100fd01e6fd01e300080000fffffffffffe02080000000000009b5e0408000000000000c3500604000000fd08b0af002102d7dde8e10a5a22c9bd0d7ef5494d85683ac050253b917615d4f97af633f0a8e20221035f5e9d58b4328566223c107d86cf853e6b9fae1d26ff6d969be0178d1423c4ea04210230fde9c031f487db95ff55b7c0acbe0c7c26a8d82615e9184416bd350101616706210225afb4e88eac8b47b67adeaf085f5eb5d37d936f56138f0848de3d104edf113208210208e4687a95c172b86b920c3bc5dbd5f023094ec2cb0abdb74f9b624f45740df90acdcc00a8020000000174c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e00000000000f55f9800310270000000000002200208309b406e3b96e76cde414fbb8f5159f5b25b24075656c6382cec797854d53495e9b0000000000002200204c5f18e5e95b184f34d02ba6de8a2a4e36ae3d4ec87299ad81f3284dc7195c6350c300000000000016001425df8ec4a074f80579fed67d4707d5ec8ed7e8d350c92220022045cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d0c3c3b00010102080000000000989680040400000051062066687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f29250804000000000240671c9badf26bd3a1ebd2d17020c6be20587d7822530daacc52c28839875eaec64b575a21729ed27311f6d79fdf6fe8702b0a798f7d842e39ede1b56f249a613404010006407e2650c201383711eed2a7cb8652c3e77ee6a395e81849c5c222217ed68b333c0ca9f1e900662ae68a7359efa7ef9d90613f2a62f7c3ff90f8c25e2cc974c9d3010000000000000001010000000000000000090b2a953d93a124c600ecb1a0ccfed420169cdd37f538ad94a3e4e6318c93c14adf59cdfbb40bdd40950c9f8dd547d29d75a173e1376a7850743394c46dea2dfd01cefd01ca00fd017ffd017c00080000ffffffffffff0208000000000000c2990408000000000000c3500604000000fd08b0af002102295e2de39eb3dcc2882f8cc266df7882a8b6d2c32aa08799f49b693aad3be28e022103a1f98e85886df54add6908b4fc1ff515e44aedefe9eb9c02879c89994298fa79042103a650bf03971df0176c7b412247390ef717853e8bd487b204dccc2fe2078bb75206210390bbbcebe9f70ba5dfd98866a79f72f75e0a6ea550ef73b202dd87cd6477350a08210284152d57908488e666e872716a286eb670b3d06cbeebf3f2e4ad350e01ec5e5b0aa2a1007d020000000174c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e00000000000f55f9800299c2000000000000220020740e108cfbc93967b6ab242a351ebee7de51814cf78d366adefd78b10281f17e50c300000000000016001425df8ec4a074f80579fed67d4707d5ec8ed7e8d351c92220022004f8eda5676356f539169a8e9a1e86c7f125283328d6f4bded1b939b52a6a7e30c00024045cb2485594bb1ec08e7bb6af4f89c912bd53f006d7876ea956773e04a4aad4a40e2b8d4fc612102f0b54061b3c1239fb78783053e8e6f9d92b1b99f81ae9ec2040100060000fd019600b0af002103c21e841cbc0b48197d060c71e116c185fa0ac281b7d0aa5924f535154437ca3b02210270b20ad0f2c2bb30a55590fc77778495bc1b38c96476901145dda57491237f0f042103b4e59df102747edc3a3e2283b42b88a8c8218ffd0dcfb52f2524b371d64cadaa062103d902b7b8b3434076d2b210e912c76645048b71e28995aad227a465a65ccd817608210301e9a52f923c157941de4a7692e601f758660969dcf5abdb67817efe84cce2ef0202009004010106b7b600b0af00210307a78def56cba9fc4db22a25928181de538ee59ba1a475ae113af7790acd0db30221034d0f817cb19b4a3bd144b615459bd06cbab3b4bdc96d73e18549a992cee80e8104210380542b59a9679890cba529fe155a9508ef57dac7416d035b23666e3fb98c3814062103adde8029d3ee281a32e9db929b39f503ff9d7e93cd308eb157955344dc6def84082103205087e2dc1f6b9937e887dfa712c5bdfa950b01dbda3ebac4c85efdde48ee6a02020090082274c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e000000000287010108d30df34e3a1e00ecdd03a2c843db062479a81752c4dfd0cc4baef0f81e7bc7ef8820990daf8d8e8d30a3b4b08af12c9f5cd71e45c7238103e0c80ca13850862e4fd2c56b69b7195312518de1bfe9aed63c80bb7760d70b2a870d542d815895fd12423d11e2adb0cdf55d776dac8f487c9b3b7ea12f1b150eb15889cf41333ade465692bf1cdc360b9c2a19bf8c1ca4fed7639d8bc953d36c10d8c6c9a8c0a57608788979bcf145e61b308006896e21d03e92084f93bd78740c20639134a7a8fd019afd019600b0af002103c21e841cbc0b48197d060c71e116c185fa0ac281b7d0aa5924f535154437ca3b02210270b20ad0f2c2bb30a55590fc77778495bc1b38c96476901145dda57491237f0f042103b4e59df102747edc3a3e2283b42b88a8c8218ffd0dcfb52f2524b371d64cadaa062103d902b7b8b3434076d2b210e912c76645048b71e28995aad227a465a65ccd817608210301e9a52f923c157941de4a7692e601f758660969dcf5abdb67817efe84cce2ef0202009004010106b7b600b0af00210307a78def56cba9fc4db22a25928181de538ee59ba1a475ae113af7790acd0db30221034d0f817cb19b4a3bd144b615459bd06cbab3b4bdc96d73e18549a992cee80e8104210380542b59a9679890cba529fe155a9508ef57dac7416d035b23666e3fb98c3814062103adde8029d3ee281a32e9db929b39f503ff9d7e93cd308eb157955344dc6def84082103205087e2dc1f6b9937e887dfa712c5bdfa950b01dbda3ebac4c85efdde48ee6a02020090082274c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e000000000000000186a00000000000000000000000004d49e5da0000000000000000000000000000002a000000000000000001b77b61346a2a408afdb01743a2230cb36e55771a0790f67a0910e207fd223fc8000000000000000145cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d00000000041000080000000000989680020400000051160004000000510208000000000000000004040000000b000000000000000145cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d00000000b77b61346a2a408afdb01743a2230cb36e55771a0790f67a0910e207fd223fc80000005000000000000000000000000000000000000101300300050007010109210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c230d000f020000",
+                       "0101fffffffffffffffff9550f22c95100160014d5a9aa98b89acc215fc3d23d6fec0ad59ca3665f00002200204c5f18e5e95b184f34d02ba6de8a2a4e36ae3d4ec87299ad81f3284dc7195c6302d7dde8e10a5a22c9bd0d7ef5494d85683ac050253b917615d4f97af633f0a8e2035f5e9d58b4328566223c107d86cf853e6b9fae1d26ff6d969be0178d1423c4ea0016001467822698d782e8421ebdf96d010de99382b7ec2300160014caf6d80fe2bab80473b021f57588a9c384bf23170000000000000000000000004d49e5da0000000000000000000000000000002a0270b20ad0f2c2bb30a55590fc77778495bc1b38c96476901145dda57491237f0f74c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e00000022002034c0cc0ad0dd5fe61dcf7ef58f995e3d34f8dbd24aa2a6fae68fefe102bf025c21391732ce658e1fe167300bb689a81e7db5399b9ee4095e217b0e997e8dd3d17a0000000000000000004a002103adde8029d3ee281a32e9db929b39f503ff9d7e93cd308eb157955344dc6def84022103205087e2dc1f6b9937e887dfa712c5bdfa950b01dbda3ebac4c85efdde48ee6a04020090004752210307a78def56cba9fc4db22a25928181de538ee59ba1a475ae113af7790acd0db32103c21e841cbc0b48197d060c71e116c185fa0ac281b7d0aa5924f535154437ca3b52ae00000000000186a0ffffffffffff0291e7c0a3232fb8650a6b4089568a81062b48a768780e5a74bb4a4a74e33aec2c029d5760248ec86c4a76d9df8308555785a06a65472fb995f5b392d520bbd000650090c1c94b11625690c9d84c5daa67b6ad19fcc7f9f23e194384140b08fcab9e8e810000ffffffffffff000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000002167c86cc0e598a6b541f7c9bf9ef17222e4a76f636e2d22185aeadd2b02d029c0000000000000000391732ce658e1fe167300bb689a81e7db5399b9ee4095e217b0e997e8dd3d17a00000000000000010000000000009896800000005166687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f29250500000000a0009d00202d704fbfe342a9ff6eaca14d80a24aaed0e680bbbdd36157b6f2798c61d906910120f9fe5e552aa0fc45020f0505efde432a4e373e5d393863973a6899f8c26d33d102080000000000989680044d4c00210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c2302090007000000000241000408000001000000000006020000080800000000009896800a04000000460000000000000000000000000000000166687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925fffffffffffe01e3002004f8eda5676356f539169a8e9a1e86c7f125283328d6f4bded1b939b52a6a7e30108000000000000c299022103a1f98e85886df54add6908b4fc1ff515e44aedefe9eb9c02879c89994298fa79042103a650bf03971df0176c7b412247390ef717853e8bd487b204dccc2fe2078bb75206210390bbbcebe9f70ba5dfd98866a79f72f75e0a6ea550ef73b202dd87cd6477350a08210284152d57908488e666e872716a286eb670b3d06cbeebf3f2e4ad350e01ec5e5b0a2102295e2de39eb3dcc2882f8cc266df7882a8b6d2c32aa08799f49b693aad3be28e0c04000000fd0e00fd0202002045cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d01080000000000009b5e0221035f5e9d58b4328566223c107d86cf853e6b9fae1d26ff6d969be0178d1423c4ea04210230fde9c031f487db95ff55b7c0acbe0c7c26a8d82615e9184416bd350101616706210225afb4e88eac8b47b67adeaf085f5eb5d37d936f56138f0848de3d104edf113208210208e4687a95c172b86b920c3bc5dbd5f023094ec2cb0abdb74f9b624f45740df90a2102d7dde8e10a5a22c9bd0d7ef5494d85683ac050253b917615d4f97af633f0a8e20c04000000fd0efd011d3b00010102080000000000989680040400000051062066687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925080400000000417e2650c201383711eed2a7cb8652c3e77ee6a395e81849c5c222217ed68b333c0ca9f1e900662ae68a7359efa7ef9d90613f2a62f7c3ff90f8c25e2cc974c9d3a0009d00202d704fbfe342a9ff6eaca14d80a24aaed0e680bbbdd36157b6f2798c61d906910120f9fe5e552aa0fc45020f0505efde432a4e373e5d393863973a6899f8c26d33d102080000000000989680044d4c00210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c2302090007000000000241000408000001000000000006020000080800000000009896800a0400000046fffffffffffefffffffffffe000000000000000000000000000000000000000000000000f1600ef6ea657b8d411d553516ae35cedfe86b0cd48d1f91b32772facbae757d0000000b0000000000000002fd01da002045cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d01fd01840200000000010174c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e00000000000f55f9800310270000000000002200208309b406e3b96e76cde414fbb8f5159f5b25b24075656c6382cec797854d53495e9b0000000000002200204c5f18e5e95b184f34d02ba6de8a2a4e36ae3d4ec87299ad81f3284dc7195c6350c300000000000016001425df8ec4a074f80579fed67d4707d5ec8ed7e8d304004730440220671c9badf26bd3a1ebd2d17020c6be20587d7822530daacc52c28839875eaec602204b575a21729ed27311f6d79fdf6fe8702b0a798f7d842e39ede1b56f249a613401473044022016a0da36f70cbf5d889586af88f238982889dc161462c56557125c7acfcb69e9022036ae10c6cc8cbc3b27d9e9ef6babb556086585bc819f252208bd175286699fdd014752210307a78def56cba9fc4db22a25928181de538ee59ba1a475ae113af7790acd0db32103c21e841cbc0b48197d060c71e116c185fa0ac281b7d0aa5924f535154437ca3b52ae50c9222002040000000b0320f1600ef6ea657b8d411d553516ae35cedfe86b0cd48d1f91b32772facbae757d0406030400020090fd02a1002045cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d01fd01840200000000010174c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e00000000000f55f9800310270000000000002200208309b406e3b96e76cde414fbb8f5159f5b25b24075656c6382cec797854d53495e9b0000000000002200204c5f18e5e95b184f34d02ba6de8a2a4e36ae3d4ec87299ad81f3284dc7195c6350c300000000000016001425df8ec4a074f80579fed67d4707d5ec8ed7e8d304004730440220671c9badf26bd3a1ebd2d17020c6be20587d7822530daacc52c28839875eaec602204b575a21729ed27311f6d79fdf6fe8702b0a798f7d842e39ede1b56f249a613401473044022016a0da36f70cbf5d889586af88f238982889dc161462c56557125c7acfcb69e9022036ae10c6cc8cbc3b27d9e9ef6babb556086585bc819f252208bd175286699fdd014752210307a78def56cba9fc4db22a25928181de538ee59ba1a475ae113af7790acd0db32103c21e841cbc0b48197d060c71e116c185fa0ac281b7d0aa5924f535154437ca3b52ae50c9222002040000000b0320f1600ef6ea657b8d411d553516ae35cedfe86b0cd48d1f91b32772facbae757d04cd01cb00c901c7002245cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d0001022102d7dde8e10a5a22c9bd0d7ef5494d85683ac050253b917615d4f97af633f0a8e204020090062b5e9b0000000000002200204c5f18e5e95b184f34d02ba6de8a2a4e36ae3d4ec87299ad81f3284dc7195c630821035f5e9d58b4328566223c107d86cf853e6b9fae1d26ff6d969be0178d1423c4ea0a200000000000000000000000004d49e5da0000000000000000000000000000002a0c0800000000000186a0000000000000000274c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e0000000000000001000000000022002034c0cc0ad0dd5fe61dcf7ef58f995e3d34f8dbd24aa2a6fae68fefe102bf025c45cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d000000000000000100000000002200208309b406e3b96e76cde414fbb8f5159f5b25b24075656c6382cec797854d5349010100160014d5a9aa98b89acc215fc3d23d6fec0ad59ca3665ffd027100fd01e6fd01e300080000fffffffffffe02080000000000009b5e0408000000000000c3500604000000fd08b0af002102d7dde8e10a5a22c9bd0d7ef5494d85683ac050253b917615d4f97af633f0a8e20221035f5e9d58b4328566223c107d86cf853e6b9fae1d26ff6d969be0178d1423c4ea04210230fde9c031f487db95ff55b7c0acbe0c7c26a8d82615e9184416bd350101616706210225afb4e88eac8b47b67adeaf085f5eb5d37d936f56138f0848de3d104edf113208210208e4687a95c172b86b920c3bc5dbd5f023094ec2cb0abdb74f9b624f45740df90acdcc00a8020000000174c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e00000000000f55f9800310270000000000002200208309b406e3b96e76cde414fbb8f5159f5b25b24075656c6382cec797854d53495e9b0000000000002200204c5f18e5e95b184f34d02ba6de8a2a4e36ae3d4ec87299ad81f3284dc7195c6350c300000000000016001425df8ec4a074f80579fed67d4707d5ec8ed7e8d350c92220022045cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d0c3c3b00010102080000000000989680040400000051062066687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f29250804000000000240671c9badf26bd3a1ebd2d17020c6be20587d7822530daacc52c28839875eaec64b575a21729ed27311f6d79fdf6fe8702b0a798f7d842e39ede1b56f249a613404010006407e2650c201383711eed2a7cb8652c3e77ee6a395e81849c5c222217ed68b333c0ca9f1e900662ae68a7359efa7ef9d90613f2a62f7c3ff90f8c25e2cc974c9d3010000000000000001010000000000000000090b2a953d93a124c600ecb1a0ccfed420169cdd37f538ad94a3e4e6318c93c14adf59cdfbb40bdd40950c9f8dd547d29d75a173e1376a7850743394c46dea2dfd01cefd01ca00fd017ffd017c00080000ffffffffffff0208000000000000c2990408000000000000c3500604000000fd08b0af002102295e2de39eb3dcc2882f8cc266df7882a8b6d2c32aa08799f49b693aad3be28e022103a1f98e85886df54add6908b4fc1ff515e44aedefe9eb9c02879c89994298fa79042103a650bf03971df0176c7b412247390ef717853e8bd487b204dccc2fe2078bb75206210390bbbcebe9f70ba5dfd98866a79f72f75e0a6ea550ef73b202dd87cd6477350a08210284152d57908488e666e872716a286eb670b3d06cbeebf3f2e4ad350e01ec5e5b0aa2a1007d020000000174c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e00000000000f55f9800299c2000000000000220020740e108cfbc93967b6ab242a351ebee7de51814cf78d366adefd78b10281f17e50c300000000000016001425df8ec4a074f80579fed67d4707d5ec8ed7e8d351c92220022004f8eda5676356f539169a8e9a1e86c7f125283328d6f4bded1b939b52a6a7e30c00024045cb2485594bb1ec08e7bb6af4f89c912bd53f006d7876ea956773e04a4aad4a40e2b8d4fc612102f0b54061b3c1239fb78783053e8e6f9d92b1b99f81ae9ec2040100060000fd019600b0af002103c21e841cbc0b48197d060c71e116c185fa0ac281b7d0aa5924f535154437ca3b02210270b20ad0f2c2bb30a55590fc77778495bc1b38c96476901145dda57491237f0f042103b4e59df102747edc3a3e2283b42b88a8c8218ffd0dcfb52f2524b371d64cadaa062103d902b7b8b3434076d2b210e912c76645048b71e28995aad227a465a65ccd817608210301e9a52f923c157941de4a7692e601f758660969dcf5abdb67817efe84cce2ef0202009004010106b7b600b0af00210307a78def56cba9fc4db22a25928181de538ee59ba1a475ae113af7790acd0db30221034d0f817cb19b4a3bd144b615459bd06cbab3b4bdc96d73e18549a992cee80e8104210380542b59a9679890cba529fe155a9508ef57dac7416d035b23666e3fb98c3814062103adde8029d3ee281a32e9db929b39f503ff9d7e93cd308eb157955344dc6def84082103205087e2dc1f6b9937e887dfa712c5bdfa950b01dbda3ebac4c85efdde48ee6a02020090082274c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e000000000287010108d30df34e3a1e00ecdd03a2c843db062479a81752c4dfd0cc4baef0f81e7bc7ef8820990daf8d8e8d30a3b4b08af12c9f5cd71e45c7238103e0c80ca13850862e4fd2c56b69b7195312518de1bfe9aed63c80bb7760d70b2a870d542d815895fd12423d11e2adb0cdf55d776dac8f487c9b3b7ea12f1b150eb15889cf41333ade465692bf1cdc360b9c2a19bf8c1ca4fed7639d8bc953d36c10d8c6c9a8c0a57608788979bcf145e61b308006896e21d03e92084f93bd78740c20639134a7a8fd019afd019600b0af002103c21e841cbc0b48197d060c71e116c185fa0ac281b7d0aa5924f535154437ca3b02210270b20ad0f2c2bb30a55590fc77778495bc1b38c96476901145dda57491237f0f042103b4e59df102747edc3a3e2283b42b88a8c8218ffd0dcfb52f2524b371d64cadaa062103d902b7b8b3434076d2b210e912c76645048b71e28995aad227a465a65ccd817608210301e9a52f923c157941de4a7692e601f758660969dcf5abdb67817efe84cce2ef0202009004010106b7b600b0af00210307a78def56cba9fc4db22a25928181de538ee59ba1a475ae113af7790acd0db30221034d0f817cb19b4a3bd144b615459bd06cbab3b4bdc96d73e18549a992cee80e8104210380542b59a9679890cba529fe155a9508ef57dac7416d035b23666e3fb98c3814062103adde8029d3ee281a32e9db929b39f503ff9d7e93cd308eb157955344dc6def84082103205087e2dc1f6b9937e887dfa712c5bdfa950b01dbda3ebac4c85efdde48ee6a02020090082274c52ab4f11296d62b66a6dba9513b04a3e7fb5a09a30cee22fce7294ab55b7e000000000000000186a00000000000000000000000004d49e5da0000000000000000000000000000002a00000000000000000000000000000000000000000000000001000000510000000000000001000000000000000145cfd42d0989e55b953f516ac7fd152bd90ec4438a2fc636f97ddd32a0c8fe0d00000000041000080000000000989680020400000051160004000000510208000000000000000004040000000b0000000000000000000101300300050007010109210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c230d000f020000",
                ).unwrap();
                reload_node!(nodes[0], &nodes[0].node.encode(), &[&serialized_monitor], persister, new_chain_monitor, node_deserialized);
        }
@@ -2185,21 +2185,20 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
        // revoked outputs.
        {
                let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-               assert_eq!(txn.len(), 2);
+               assert_eq!(txn.len(), 4);
 
-               let (revoked_claim_a, revoked_claim_b) = if txn[0].input[0].previous_output.txid == revoked_commitment_a.txid() {
-                       (&txn[0], &txn[1])
+               let (revoked_htlc_claim_a, revoked_htlc_claim_b) = if txn[0].input[0].previous_output.txid == revoked_commitment_a.txid() {
+                       (if txn[0].input.len() == 2 { &txn[0] } else { &txn[1] }, if txn[2].input.len() == 2 { &txn[2] } else { &txn[3] })
                } else {
-                       (&txn[1], &txn[0])
+                       (if txn[2].input.len() == 2 { &txn[2] } else { &txn[3] }, if txn[0].input.len() == 2 { &txn[0] } else { &txn[1] })
                };
 
-               // TODO: to_self claim must be separate from HTLC claims
-               assert_eq!(revoked_claim_a.input.len(), 3); // Spends both HTLC outputs and to_self output
-               assert_eq!(revoked_claim_a.output.len(), 1);
-               check_spends!(revoked_claim_a, revoked_commitment_a);
-               assert_eq!(revoked_claim_b.input.len(), 3); // Spends both HTLC outputs and to_self output
-               assert_eq!(revoked_claim_b.output.len(), 1);
-               check_spends!(revoked_claim_b, revoked_commitment_b);
+               assert_eq!(revoked_htlc_claim_a.input.len(), 2); // Spends both HTLC outputs
+               assert_eq!(revoked_htlc_claim_a.output.len(), 1);
+               check_spends!(revoked_htlc_claim_a, revoked_commitment_a);
+               assert_eq!(revoked_htlc_claim_b.input.len(), 2); // Spends both HTLC outputs
+               assert_eq!(revoked_htlc_claim_b.output.len(), 1);
+               check_spends!(revoked_htlc_claim_b, revoked_commitment_b);
        }
 
        // Since Bob was able to confirm his revoked commitment, he'll now try to claim the HTLCs
@@ -2296,21 +2295,7 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
        // the second level instead.
        let revoked_claims = {
                let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-               assert_eq!(txn.len(), 4);
-
-               let revoked_to_self_claim_a = txn.iter().find(|tx|
-                       tx.input.len() == 1 &&
-                       tx.output.len() == 1 &&
-                       tx.input[0].previous_output.txid == revoked_commitment_a.txid()
-               ).unwrap();
-               check_spends!(revoked_to_self_claim_a, revoked_commitment_a);
-
-               let revoked_to_self_claim_b = txn.iter().find(|tx|
-                       tx.input.len() == 1 &&
-                       tx.output.len() == 1 &&
-                       tx.input[0].previous_output.txid == revoked_commitment_b.txid()
-               ).unwrap();
-               check_spends!(revoked_to_self_claim_b, revoked_commitment_b);
+               assert_eq!(txn.len(), 2);
 
                let revoked_htlc_claims = txn.iter().filter(|tx|
                        tx.input.len() == 2 &&
@@ -2343,7 +2328,7 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
 
        assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
        let spendable_output_events = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
-       assert_eq!(spendable_output_events.len(), 4);
+       assert_eq!(spendable_output_events.len(), 2);
        for (idx, event) in spendable_output_events.iter().enumerate() {
                if let Event::SpendableOutputs { outputs } = event {
                        assert_eq!(outputs.len(), 1);
@@ -2358,7 +2343,8 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
 
        assert!(nodes[0].node.list_channels().is_empty());
        assert!(nodes[1].node.list_channels().is_empty());
-       assert!(nodes[0].chain_monitor.chain_monitor.get_claimable_balances(&[]).is_empty());
+       // On the Alice side, the individual to_self_claim are still pending confirmation.
+       assert_eq!(nodes[0].chain_monitor.chain_monitor.get_claimable_balances(&[]).len(), 2);
        // TODO: From Bob's PoV, he still thinks he can claim the outputs from his revoked commitment.
        // This needs to be fixed before we enable pruning `ChannelMonitor`s once they don't have any
        // balances to claim.
index 39cb9454db89a541ac5f039e8aa11d9d09ab8854..76ed56635ac6a57bd22cb82f5bba93e4c210b5c6 100644 (file)
@@ -1738,7 +1738,7 @@ impl Readable for Init {
                        (3, remote_network_address, option)
                });
                Ok(Init {
-                       features: features.or(global_features),
+                       features: features | global_features,
                        remote_network_address,
                })
        }
index f107f3b558395fe7ea9da8f8a8194f8f52a9f634..a13a618d83c4c2b0e8988f6103a1a26c2fcb2d63 100644 (file)
@@ -312,7 +312,7 @@ impl<T: Time> Display for PaymentAttemptsUsingTime<T> {
 /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
 /// [`Event::PaymentPathFailed`]: crate::events::Event::PaymentPathFailed
 /// [`Event::PaymentFailed`]: crate::events::Event::PaymentFailed
-#[derive(Clone, Debug)]
+#[derive(Clone, Debug, PartialEq, Eq)]
 pub enum RetryableSendFailure {
        /// The provided [`PaymentParameters::expiry_time`] indicated that the payment has expired. Note
        /// that this error is *not* caused by [`Retry::Timeout`].
index 5e95d019d998129d4cc9a4ca7793ae823deb05f7..9e044d1c92d686479d6f76187001ac75c55d92aa 100644 (file)
@@ -30,8 +30,6 @@ use crate::util::errors::APIError;
 use crate::util::ser::Writeable;
 use crate::util::string::UntrustedString;
 
-use bitcoin::{Block, BlockHeader, TxMerkleNode};
-use bitcoin::hashes::Hash;
 use bitcoin::network::constants::Network;
 
 use crate::prelude::*;
@@ -693,8 +691,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        check_added_monitors!(nodes[1], 1);
        expect_payment_claimed!(nodes[1], payment_hash, 10_000_000);
 
-       let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
-       connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone()]});
+       connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[1].clone()]));
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
@@ -702,15 +699,13 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        assert_eq!(claim_txn.len(), 1);
        check_spends!(claim_txn[0], node_txn[1]);
 
-       header.prev_blockhash = nodes[0].best_block_hash();
-       connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone()]});
+       connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[1].clone()]));
 
        if confirm_commitment_tx {
                connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
        }
 
-       header.prev_blockhash = nodes[0].best_block_hash();
-       let claim_block = Block { header, txdata: if payment_timeout { timeout_txn } else { vec![claim_txn[0].clone()] } };
+       let claim_block = create_dummy_block(nodes[0].best_block_hash(), 42, if payment_timeout { timeout_txn } else { vec![claim_txn[0].clone()] });
 
        if payment_timeout {
                assert!(confirm_commitment_tx); // Otherwise we're spending below our CSV!
@@ -1522,10 +1517,7 @@ fn do_test_intercepted_payment(test: InterceptTest) {
                        _ => panic!("Unexpected event")
                }
        } else if test == InterceptTest::Timeout {
-               let mut block = Block {
-                       header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
-                       txdata: vec![],
-               };
+               let mut block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new());
                connect_block(&nodes[0], &block);
                connect_block(&nodes[1], &block);
                for _ in 0..TEST_FINAL_CLTV {
index 0659412f774190ba7adee5ab599d3c4e0174cbb0..3cb6a55f2e80ae0a8a1f717f6e03afe66a9faa64 100644 (file)
@@ -36,7 +36,7 @@ use crate::prelude::*;
 use crate::io;
 use alloc::collections::LinkedList;
 use crate::sync::{Arc, Mutex, MutexGuard, FairRwLock};
-use core::sync::atomic::{AtomicBool, AtomicU32, Ordering};
+use core::sync::atomic::{AtomicBool, AtomicU32, AtomicI32, Ordering};
 use core::{cmp, hash, fmt, mem};
 use core::ops::Deref;
 use core::convert::Infallible;
@@ -64,6 +64,20 @@ pub trait CustomMessageHandler: wire::CustomMessageReader {
        /// in the process. Each message is paired with the node id of the intended recipient. If no
        /// connection to the node exists, then the message is simply not sent.
        fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)>;
+
+       /// Gets the node feature flags which this handler itself supports. All available handlers are
+       /// queried similarly and their feature flags are OR'd together to form the [`NodeFeatures`]
+       /// which are broadcasted in our [`NodeAnnouncement`] message.
+       ///
+       /// [`NodeAnnouncement`]: crate::ln::msgs::NodeAnnouncement
+       fn provided_node_features(&self) -> NodeFeatures;
+
+       /// Gets the init feature flags which should be sent to the given peer. All available handlers
+       /// are queried similarly and their feature flags are OR'd together to form the [`InitFeatures`]
+       /// which are sent in our [`Init`] message.
+       ///
+       /// [`Init`]: crate::ln::msgs::Init
+       fn provided_init_features(&self, their_node_id: &PublicKey) -> InitFeatures;
 }
 
 /// A dummy struct which implements `RoutingMessageHandler` without storing any routing information
@@ -149,6 +163,12 @@ impl CustomMessageHandler for IgnoringMessageHandler {
        }
 
        fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> { Vec::new() }
+
+       fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
+
+       fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
+               InitFeatures::empty()
+       }
 }
 
 /// A dummy struct which implements `ChannelMessageHandler` without having any channels.
@@ -676,15 +696,18 @@ pub struct PeerManager<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: D
        /// lock held. Entries may be added with only the `peers` read lock held (though the
        /// `Descriptor` value must already exist in `peers`).
        node_id_to_descriptor: Mutex<HashMap<PublicKey, Descriptor>>,
-       /// We can only have one thread processing events at once, but we don't usually need the full
-       /// `peers` write lock to do so, so instead we block on this empty mutex when entering
-       /// `process_events`.
-       event_processing_lock: Mutex<()>,
-       /// Because event processing is global and always does all available work before returning,
-       /// there is no reason for us to have many event processors waiting on the lock at once.
-       /// Instead, we limit the total blocked event processors to always exactly one by setting this
-       /// when an event process call is waiting.
-       blocked_event_processors: AtomicBool,
+       /// We can only have one thread processing events at once, but if a second call to
+       /// `process_events` happens while a first call is in progress, one of the two calls needs to
+       /// start from the top to ensure any new messages are also handled.
+       ///
+       /// Because the event handler calls into user code which may block, we don't want to block a
+       /// second thread waiting for another thread to handle events which is then blocked on user
+       /// code, so we store an atomic counter here:
+       ///  * 0 indicates no event processor is running
+       ///  * 1 indicates an event processor is running
+       ///  * > 1 indicates an event processor is running but needs to start again from the top once
+       ///        it finishes as another thread tried to start processing events but returned early.
+       event_processing_state: AtomicI32,
 
        /// Used to track the last value sent in a node_announcement "timestamp" field. We ensure this
        /// value increases strictly since we don't assume access to a time source.
@@ -854,8 +877,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                        message_handler,
                        peers: FairRwLock::new(HashMap::new()),
                        node_id_to_descriptor: Mutex::new(HashMap::new()),
-                       event_processing_lock: Mutex::new(()),
-                       blocked_event_processors: AtomicBool::new(false),
+                       event_processing_state: AtomicI32::new(0),
                        ephemeral_key_midstate,
                        peer_counter: AtomicCounter::new(),
                        gossip_processing_backlogged: AtomicBool::new(false),
@@ -895,6 +917,13 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                SecretKey::from_slice(&Sha256::from_engine(ephemeral_hash).into_inner()).expect("You broke SHA-256!")
        }
 
+       fn init_features(&self, their_node_id: &PublicKey) -> InitFeatures {
+               self.message_handler.chan_handler.provided_init_features(their_node_id)
+                       | self.message_handler.route_handler.provided_init_features(their_node_id)
+                       | self.message_handler.onion_message_handler.provided_init_features(their_node_id)
+                       | self.message_handler.custom_message_handler.provided_init_features(their_node_id)
+       }
+
        /// Indicates a new outbound connection has been established to a node with the given `node_id`
        /// and an optional remote network address.
        ///
@@ -1290,9 +1319,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                                                                peer.set_their_node_id(their_node_id);
                                                                insert_node_id!();
-                                                               let features = self.message_handler.chan_handler.provided_init_features(&their_node_id)
-                                                                       .or(self.message_handler.route_handler.provided_init_features(&their_node_id))
-                                                                       .or(self.message_handler.onion_message_handler.provided_init_features(&their_node_id));
+                                                               let features = self.init_features(&their_node_id);
                                                                let resp = msgs::Init { features, remote_network_address: filter_addresses(peer.their_net_address.clone()) };
                                                                self.enqueue_message(peer, &resp);
                                                                peer.awaiting_pong_timer_tick_intervals = 0;
@@ -1304,9 +1331,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                peer.pending_read_is_header = true;
                                                                peer.set_their_node_id(their_node_id);
                                                                insert_node_id!();
-                                                               let features = self.message_handler.chan_handler.provided_init_features(&their_node_id)
-                                                                       .or(self.message_handler.route_handler.provided_init_features(&their_node_id))
-                                                                       .or(self.message_handler.onion_message_handler.provided_init_features(&their_node_id));
+                                                               let features = self.init_features(&their_node_id);
                                                                let resp = msgs::Init { features, remote_network_address: filter_addresses(peer.their_net_address.clone()) };
                                                                self.enqueue_message(peer, &resp);
                                                                peer.awaiting_pong_timer_tick_intervals = 0;
@@ -1423,10 +1448,17 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
 
                // Need an Init as first message
                if let wire::Message::Init(msg) = message {
-                       if msg.features.requires_unknown_bits() {
-                               log_debug!(self.logger, "Peer features required unknown version bits");
+                       let our_features = self.init_features(&their_node_id);
+                       if msg.features.requires_unknown_bits_from(&our_features) {
+                               log_debug!(self.logger, "Peer requires features unknown to us");
                                return Err(PeerHandleError { }.into());
                        }
+
+                       if our_features.requires_unknown_bits_from(&msg.features) {
+                               log_debug!(self.logger, "We require features unknown to our peer");
+                               return Err(PeerHandleError { }.into());
+                       }
+
                        if peer_lock.their_features.is_some() {
                                return Err(PeerHandleError { }.into());
                        }
@@ -1784,356 +1816,351 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
        /// [`ChannelManager::process_pending_htlc_forwards`]: crate::ln::channelmanager::ChannelManager::process_pending_htlc_forwards
        /// [`send_data`]: SocketDescriptor::send_data
        pub fn process_events(&self) {
-               let mut _single_processor_lock = self.event_processing_lock.try_lock();
-               if _single_processor_lock.is_err() {
-                       // While we could wake the older sleeper here with a CV and make more even waiting
-                       // times, that would be a lot of overengineering for a simple "reduce total waiter
-                       // count" goal.
-                       match self.blocked_event_processors.compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) {
-                               Err(val) => {
-                                       debug_assert!(val, "compare_exchange failed spuriously?");
-                                       return;
-                               },
-                               Ok(val) => {
-                                       debug_assert!(!val, "compare_exchange succeeded spuriously?");
-                                       // We're the only waiter, as the running process_events may have emptied the
-                                       // pending events "long" ago and there are new events for us to process, wait until
-                                       // its done and process any leftover events before returning.
-                                       _single_processor_lock = Ok(self.event_processing_lock.lock().unwrap());
-                                       self.blocked_event_processors.store(false, Ordering::Release);
-                               }
-                       }
+               if self.event_processing_state.fetch_add(1, Ordering::AcqRel) > 0 {
+                       // If we're not the first event processor to get here, just return early, the increment
+                       // we just did will be treated as "go around again" at the end.
+                       return;
                }
 
-               self.update_gossip_backlogged();
-               let flush_read_disabled = self.gossip_processing_backlog_lifted.swap(false, Ordering::Relaxed);
-
-               let mut peers_to_disconnect = HashMap::new();
-               let mut events_generated = self.message_handler.chan_handler.get_and_clear_pending_msg_events();
-               events_generated.append(&mut self.message_handler.route_handler.get_and_clear_pending_msg_events());
+               loop {
+                       self.update_gossip_backlogged();
+                       let flush_read_disabled = self.gossip_processing_backlog_lifted.swap(false, Ordering::Relaxed);
 
-               {
-                       // TODO: There are some DoS attacks here where you can flood someone's outbound send
-                       // buffer by doing things like announcing channels on another node. We should be willing to
-                       // drop optional-ish messages when send buffers get full!
+                       let mut peers_to_disconnect = HashMap::new();
+                       let mut events_generated = self.message_handler.chan_handler.get_and_clear_pending_msg_events();
+                       events_generated.append(&mut self.message_handler.route_handler.get_and_clear_pending_msg_events());
 
-                       let peers_lock = self.peers.read().unwrap();
-                       let peers = &*peers_lock;
-                       macro_rules! get_peer_for_forwarding {
-                               ($node_id: expr) => {
-                                       {
-                                               if peers_to_disconnect.get($node_id).is_some() {
-                                                       // If we've "disconnected" this peer, do not send to it.
-                                                       continue;
-                                               }
-                                               let descriptor_opt = self.node_id_to_descriptor.lock().unwrap().get($node_id).cloned();
-                                               match descriptor_opt {
-                                                       Some(descriptor) => match peers.get(&descriptor) {
-                                                               Some(peer_mutex) => {
-                                                                       let peer_lock = peer_mutex.lock().unwrap();
-                                                                       if !peer_lock.handshake_complete() {
+                       {
+                               // TODO: There are some DoS attacks here where you can flood someone's outbound send
+                               // buffer by doing things like announcing channels on another node. We should be willing to
+                               // drop optional-ish messages when send buffers get full!
+
+                               let peers_lock = self.peers.read().unwrap();
+                               let peers = &*peers_lock;
+                               macro_rules! get_peer_for_forwarding {
+                                       ($node_id: expr) => {
+                                               {
+                                                       if peers_to_disconnect.get($node_id).is_some() {
+                                                               // If we've "disconnected" this peer, do not send to it.
+                                                               continue;
+                                                       }
+                                                       let descriptor_opt = self.node_id_to_descriptor.lock().unwrap().get($node_id).cloned();
+                                                       match descriptor_opt {
+                                                               Some(descriptor) => match peers.get(&descriptor) {
+                                                                       Some(peer_mutex) => {
+                                                                               let peer_lock = peer_mutex.lock().unwrap();
+                                                                               if !peer_lock.handshake_complete() {
+                                                                                       continue;
+                                                                               }
+                                                                               peer_lock
+                                                                       },
+                                                                       None => {
+                                                                               debug_assert!(false, "Inconsistent peers set state!");
                                                                                continue;
                                                                        }
-                                                                       peer_lock
                                                                },
                                                                None => {
-                                                                       debug_assert!(false, "Inconsistent peers set state!");
                                                                        continue;
-                                                               }
-                                                       },
-                                                       None => {
-                                                               continue;
-                                                       },
+                                                               },
+                                                       }
                                                }
                                        }
                                }
-                       }
-                       for event in events_generated.drain(..) {
-                               match event {
-                                       MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling SendAcceptChannel event in peer_handler for node {} for channel {}",
-                                                               log_pubkey!(node_id),
-                                                               log_bytes!(msg.temporary_channel_id));
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::SendAcceptChannelV2 { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling SendAcceptChannelV2 event in peer_handler for node {} for channel {}",
-                                                               log_pubkey!(node_id),
-                                                               log_bytes!(msg.temporary_channel_id));
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
-                                                               log_pubkey!(node_id),
-                                                               log_bytes!(msg.temporary_channel_id));
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::SendOpenChannelV2 { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}",
-                                                               log_pubkey!(node_id),
-                                                               log_bytes!(msg.temporary_channel_id));
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
-                                                               log_pubkey!(node_id),
-                                                               log_bytes!(msg.temporary_channel_id),
-                                                               log_funding_channel_id!(msg.funding_txid, msg.funding_output_index));
-                                               // TODO: If the peer is gone we should generate a DiscardFunding event
-                                               // indicating to the wallet that they should just throw away this funding transaction
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::SendFundingSigned { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling SendFundingSigned event in peer_handler for node {} for channel {}",
-                                                               log_pubkey!(node_id),
-                                                               log_bytes!(msg.channel_id));
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::SendChannelReady { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling SendChannelReady event in peer_handler for node {} for channel {}",
-                                                               log_pubkey!(node_id),
-                                                               log_bytes!(msg.channel_id));
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling SendTxAddInput event in peer_handler for node {} for channel {}",
-                                                               log_pubkey!(node_id),
-                                                               log_bytes!(msg.channel_id));
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling SendTxAddOutput event in peer_handler for node {} for channel {}",
-                                                               log_pubkey!(node_id),
-                                                               log_bytes!(msg.channel_id));
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling SendTxRemoveInput event in peer_handler for node {} for channel {}",
-                                                               log_pubkey!(node_id),
-                                                               log_bytes!(msg.channel_id));
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling SendTxRemoveOutput event in peer_handler for node {} for channel {}",
-                                                               log_pubkey!(node_id),
-                                                               log_bytes!(msg.channel_id));
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::SendTxComplete { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling SendTxComplete event in peer_handler for node {} for channel {}",
-                                                               log_pubkey!(node_id),
-                                                               log_bytes!(msg.channel_id));
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling SendTxSignatures event in peer_handler for node {} for channel {}",
-                                                               log_pubkey!(node_id),
-                                                               log_bytes!(msg.channel_id));
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling SendTxInitRbf event in peer_handler for node {} for channel {}",
-                                                               log_pubkey!(node_id),
-                                                               log_bytes!(msg.channel_id));
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling SendTxAckRbf event in peer_handler for node {} for channel {}",
-                                                               log_pubkey!(node_id),
-                                                               log_bytes!(msg.channel_id));
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::SendTxAbort { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling SendTxAbort event in peer_handler for node {} for channel {}",
-                                                               log_pubkey!(node_id),
-                                                               log_bytes!(msg.channel_id));
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})",
-                                                               log_pubkey!(node_id),
-                                                               log_bytes!(msg.channel_id));
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
-                                               log_debug!(self.logger, "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}",
-                                                               log_pubkey!(node_id),
-                                                               update_add_htlcs.len(),
-                                                               update_fulfill_htlcs.len(),
-                                                               update_fail_htlcs.len(),
-                                                               log_bytes!(commitment_signed.channel_id));
-                                               let mut peer = get_peer_for_forwarding!(node_id);
-                                               for msg in update_add_htlcs {
-                                                       self.enqueue_message(&mut *peer, msg);
-                                               }
-                                               for msg in update_fulfill_htlcs {
-                                                       self.enqueue_message(&mut *peer, msg);
-                                               }
-                                               for msg in update_fail_htlcs {
-                                                       self.enqueue_message(&mut *peer, msg);
-                                               }
-                                               for msg in update_fail_malformed_htlcs {
-                                                       self.enqueue_message(&mut *peer, msg);
-                                               }
-                                               if let &Some(ref msg) = update_fee {
-                                                       self.enqueue_message(&mut *peer, msg);
-                                               }
-                                               self.enqueue_message(&mut *peer, commitment_signed);
-                                       },
-                                       MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}",
-                                                               log_pubkey!(node_id),
-                                                               log_bytes!(msg.channel_id));
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling SendClosingSigned event in peer_handler for node {} for channel {}",
-                                                               log_pubkey!(node_id),
-                                                               log_bytes!(msg.channel_id));
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::SendShutdown { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling Shutdown event in peer_handler for node {} for channel {}",
-                                                               log_pubkey!(node_id),
-                                                               log_bytes!(msg.channel_id));
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
-                                               log_debug!(self.logger, "Handling SendChannelReestablish event in peer_handler for node {} for channel {}",
-                                                               log_pubkey!(node_id),
-                                                               log_bytes!(msg.channel_id));
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::SendChannelAnnouncement { ref node_id, ref msg, ref update_msg } => {
-                                               log_debug!(self.logger, "Handling SendChannelAnnouncement event in peer_handler for node {} for short channel id {}",
-                                                               log_pubkey!(node_id),
-                                                               msg.contents.short_channel_id);
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), update_msg);
-                                       },
-                                       MessageSendEvent::BroadcastChannelAnnouncement { msg, update_msg } => {
-                                               log_debug!(self.logger, "Handling BroadcastChannelAnnouncement event in peer_handler for short channel id {}", msg.contents.short_channel_id);
-                                               match self.message_handler.route_handler.handle_channel_announcement(&msg) {
-                                                       Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
-                                                               self.forward_broadcast_msg(peers, &wire::Message::ChannelAnnouncement(msg), None),
-                                                       _ => {},
-                                               }
-                                               if let Some(msg) = update_msg {
+                               for event in events_generated.drain(..) {
+                                       match event {
+                                               MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => {
+                                                       log_debug!(self.logger, "Handling SendAcceptChannel event in peer_handler for node {} for channel {}",
+                                                                       log_pubkey!(node_id),
+                                                                       log_bytes!(msg.temporary_channel_id));
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::SendAcceptChannelV2 { ref node_id, ref msg } => {
+                                                       log_debug!(self.logger, "Handling SendAcceptChannelV2 event in peer_handler for node {} for channel {}",
+                                                                       log_pubkey!(node_id),
+                                                                       log_bytes!(msg.temporary_channel_id));
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => {
+                                                       log_debug!(self.logger, "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
+                                                                       log_pubkey!(node_id),
+                                                                       log_bytes!(msg.temporary_channel_id));
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::SendOpenChannelV2 { ref node_id, ref msg } => {
+                                                       log_debug!(self.logger, "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}",
+                                                                       log_pubkey!(node_id),
+                                                                       log_bytes!(msg.temporary_channel_id));
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => {
+                                                       log_debug!(self.logger, "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
+                                                                       log_pubkey!(node_id),
+                                                                       log_bytes!(msg.temporary_channel_id),
+                                                                       log_funding_channel_id!(msg.funding_txid, msg.funding_output_index));
+                                                       // TODO: If the peer is gone we should generate a DiscardFunding event
+                                                       // indicating to the wallet that they should just throw away this funding transaction
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::SendFundingSigned { ref node_id, ref msg } => {
+                                                       log_debug!(self.logger, "Handling SendFundingSigned event in peer_handler for node {} for channel {}",
+                                                                       log_pubkey!(node_id),
+                                                                       log_bytes!(msg.channel_id));
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::SendChannelReady { ref node_id, ref msg } => {
+                                                       log_debug!(self.logger, "Handling SendChannelReady event in peer_handler for node {} for channel {}",
+                                                                       log_pubkey!(node_id),
+                                                                       log_bytes!(msg.channel_id));
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => {
+                                                       log_debug!(self.logger, "Handling SendTxAddInput event in peer_handler for node {} for channel {}",
+                                                                       log_pubkey!(node_id),
+                                                                       log_bytes!(msg.channel_id));
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => {
+                                                       log_debug!(self.logger, "Handling SendTxAddOutput event in peer_handler for node {} for channel {}",
+                                                                       log_pubkey!(node_id),
+                                                                       log_bytes!(msg.channel_id));
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => {
+                                                       log_debug!(self.logger, "Handling SendTxRemoveInput event in peer_handler for node {} for channel {}",
+                                                                       log_pubkey!(node_id),
+                                                                       log_bytes!(msg.channel_id));
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => {
+                                                       log_debug!(self.logger, "Handling SendTxRemoveOutput event in peer_handler for node {} for channel {}",
+                                                                       log_pubkey!(node_id),
+                                                                       log_bytes!(msg.channel_id));
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::SendTxComplete { ref node_id, ref msg } => {
+                                                       log_debug!(self.logger, "Handling SendTxComplete event in peer_handler for node {} for channel {}",
+                                                                       log_pubkey!(node_id),
+                                                                       log_bytes!(msg.channel_id));
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => {
+                                                       log_debug!(self.logger, "Handling SendTxSignatures event in peer_handler for node {} for channel {}",
+                                                                       log_pubkey!(node_id),
+                                                                       log_bytes!(msg.channel_id));
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => {
+                                                       log_debug!(self.logger, "Handling SendTxInitRbf event in peer_handler for node {} for channel {}",
+                                                                       log_pubkey!(node_id),
+                                                                       log_bytes!(msg.channel_id));
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => {
+                                                       log_debug!(self.logger, "Handling SendTxAckRbf event in peer_handler for node {} for channel {}",
+                                                                       log_pubkey!(node_id),
+                                                                       log_bytes!(msg.channel_id));
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::SendTxAbort { ref node_id, ref msg } => {
+                                                       log_debug!(self.logger, "Handling SendTxAbort event in peer_handler for node {} for channel {}",
+                                                                       log_pubkey!(node_id),
+                                                                       log_bytes!(msg.channel_id));
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
+                                                       log_debug!(self.logger, "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})",
+                                                                       log_pubkey!(node_id),
+                                                                       log_bytes!(msg.channel_id));
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
+                                                       log_debug!(self.logger, "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}",
+                                                                       log_pubkey!(node_id),
+                                                                       update_add_htlcs.len(),
+                                                                       update_fulfill_htlcs.len(),
+                                                                       update_fail_htlcs.len(),
+                                                                       log_bytes!(commitment_signed.channel_id));
+                                                       let mut peer = get_peer_for_forwarding!(node_id);
+                                                       for msg in update_add_htlcs {
+                                                               self.enqueue_message(&mut *peer, msg);
+                                                       }
+                                                       for msg in update_fulfill_htlcs {
+                                                               self.enqueue_message(&mut *peer, msg);
+                                                       }
+                                                       for msg in update_fail_htlcs {
+                                                               self.enqueue_message(&mut *peer, msg);
+                                                       }
+                                                       for msg in update_fail_malformed_htlcs {
+                                                               self.enqueue_message(&mut *peer, msg);
+                                                       }
+                                                       if let &Some(ref msg) = update_fee {
+                                                               self.enqueue_message(&mut *peer, msg);
+                                                       }
+                                                       self.enqueue_message(&mut *peer, commitment_signed);
+                                               },
+                                               MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
+                                                       log_debug!(self.logger, "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}",
+                                                                       log_pubkey!(node_id),
+                                                                       log_bytes!(msg.channel_id));
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
+                                                       log_debug!(self.logger, "Handling SendClosingSigned event in peer_handler for node {} for channel {}",
+                                                                       log_pubkey!(node_id),
+                                                                       log_bytes!(msg.channel_id));
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::SendShutdown { ref node_id, ref msg } => {
+                                                       log_debug!(self.logger, "Handling Shutdown event in peer_handler for node {} for channel {}",
+                                                                       log_pubkey!(node_id),
+                                                                       log_bytes!(msg.channel_id));
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
+                                                       log_debug!(self.logger, "Handling SendChannelReestablish event in peer_handler for node {} for channel {}",
+                                                                       log_pubkey!(node_id),
+                                                                       log_bytes!(msg.channel_id));
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::SendChannelAnnouncement { ref node_id, ref msg, ref update_msg } => {
+                                                       log_debug!(self.logger, "Handling SendChannelAnnouncement event in peer_handler for node {} for short channel id {}",
+                                                                       log_pubkey!(node_id),
+                                                                       msg.contents.short_channel_id);
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), update_msg);
+                                               },
+                                               MessageSendEvent::BroadcastChannelAnnouncement { msg, update_msg } => {
+                                                       log_debug!(self.logger, "Handling BroadcastChannelAnnouncement event in peer_handler for short channel id {}", msg.contents.short_channel_id);
+                                                       match self.message_handler.route_handler.handle_channel_announcement(&msg) {
+                                                               Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
+                                                                       self.forward_broadcast_msg(peers, &wire::Message::ChannelAnnouncement(msg), None),
+                                                               _ => {},
+                                                       }
+                                                       if let Some(msg) = update_msg {
+                                                               match self.message_handler.route_handler.handle_channel_update(&msg) {
+                                                                       Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
+                                                                               self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None),
+                                                                       _ => {},
+                                                               }
+                                                       }
+                                               },
+                                               MessageSendEvent::BroadcastChannelUpdate { msg } => {
+                                                       log_debug!(self.logger, "Handling BroadcastChannelUpdate event in peer_handler for short channel id {}", msg.contents.short_channel_id);
                                                        match self.message_handler.route_handler.handle_channel_update(&msg) {
                                                                Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
                                                                        self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None),
                                                                _ => {},
                                                        }
+                                               },
+                                               MessageSendEvent::BroadcastNodeAnnouncement { msg } => {
+                                                       log_debug!(self.logger, "Handling BroadcastNodeAnnouncement event in peer_handler for node {}", msg.contents.node_id);
+                                                       match self.message_handler.route_handler.handle_node_announcement(&msg) {
+                                                               Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
+                                                                       self.forward_broadcast_msg(peers, &wire::Message::NodeAnnouncement(msg), None),
+                                                               _ => {},
+                                                       }
+                                               },
+                                               MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
+                                                       log_trace!(self.logger, "Handling SendChannelUpdate event in peer_handler for node {} for channel {}",
+                                                                       log_pubkey!(node_id), msg.contents.short_channel_id);
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::HandleError { ref node_id, ref action } => {
+                                                       match *action {
+                                                               msgs::ErrorAction::DisconnectPeer { ref msg } => {
+                                                                       // We do not have the peers write lock, so we just store that we're
+                                                                       // about to disconenct the peer and do it after we finish
+                                                                       // processing most messages.
+                                                                       peers_to_disconnect.insert(*node_id, msg.clone());
+                                                               },
+                                                               msgs::ErrorAction::IgnoreAndLog(level) => {
+                                                                       log_given_level!(self.logger, level, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
+                                                               },
+                                                               msgs::ErrorAction::IgnoreDuplicateGossip => {},
+                                                               msgs::ErrorAction::IgnoreError => {
+                                                                       log_debug!(self.logger, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
+                                                               },
+                                                               msgs::ErrorAction::SendErrorMessage { ref msg } => {
+                                                                       log_trace!(self.logger, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}",
+                                                                                       log_pubkey!(node_id),
+                                                                                       msg.data);
+                                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                                               },
+                                                               msgs::ErrorAction::SendWarningMessage { ref msg, ref log_level } => {
+                                                                       log_given_level!(self.logger, *log_level, "Handling SendWarningMessage HandleError event in peer_handler for node {} with message {}",
+                                                                                       log_pubkey!(node_id),
+                                                                                       msg.data);
+                                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                                               },
+                                                       }
+                                               },
+                                               MessageSendEvent::SendChannelRangeQuery { ref node_id, ref msg } => {
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
+                                               },
+                                               MessageSendEvent::SendShortIdsQuery { ref node_id, ref msg } => {
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                }
-                                       },
-                                       MessageSendEvent::BroadcastChannelUpdate { msg } => {
-                                               log_debug!(self.logger, "Handling BroadcastChannelUpdate event in peer_handler for short channel id {}", msg.contents.short_channel_id);
-                                               match self.message_handler.route_handler.handle_channel_update(&msg) {
-                                                       Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
-                                                               self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None),
-                                                       _ => {},
+                                               MessageSendEvent::SendReplyChannelRange { ref node_id, ref msg } => {
+                                                       log_gossip!(self.logger, "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}",
+                                                               log_pubkey!(node_id),
+                                                               msg.short_channel_ids.len(),
+                                                               msg.first_blocknum,
+                                                               msg.number_of_blocks,
+                                                               msg.sync_complete);
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                }
-                                       },
-                                       MessageSendEvent::BroadcastNodeAnnouncement { msg } => {
-                                               log_debug!(self.logger, "Handling BroadcastNodeAnnouncement event in peer_handler for node {}", msg.contents.node_id);
-                                               match self.message_handler.route_handler.handle_node_announcement(&msg) {
-                                                       Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
-                                                               self.forward_broadcast_msg(peers, &wire::Message::NodeAnnouncement(msg), None),
-                                                       _ => {},
+                                               MessageSendEvent::SendGossipTimestampFilter { ref node_id, ref msg } => {
+                                                       self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                }
-                                       },
-                                       MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
-                                               log_trace!(self.logger, "Handling SendChannelUpdate event in peer_handler for node {} for channel {}",
-                                                               log_pubkey!(node_id), msg.contents.short_channel_id);
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::HandleError { ref node_id, ref action } => {
-                                               match *action {
-                                                       msgs::ErrorAction::DisconnectPeer { ref msg } => {
-                                                               // We do not have the peers write lock, so we just store that we're
-                                                               // about to disconenct the peer and do it after we finish
-                                                               // processing most messages.
-                                                               peers_to_disconnect.insert(*node_id, msg.clone());
-                                                       },
-                                                       msgs::ErrorAction::IgnoreAndLog(level) => {
-                                                               log_given_level!(self.logger, level, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
-                                                       },
-                                                       msgs::ErrorAction::IgnoreDuplicateGossip => {},
-                                                       msgs::ErrorAction::IgnoreError => {
-                                                               log_debug!(self.logger, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
-                                                       },
-                                                       msgs::ErrorAction::SendErrorMessage { ref msg } => {
-                                                               log_trace!(self.logger, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}",
-                                                                               log_pubkey!(node_id),
-                                                                               msg.data);
-                                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                                       },
-                                                       msgs::ErrorAction::SendWarningMessage { ref msg, ref log_level } => {
-                                                               log_given_level!(self.logger, *log_level, "Handling SendWarningMessage HandleError event in peer_handler for node {} with message {}",
-                                                                               log_pubkey!(node_id),
-                                                                               msg.data);
-                                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                                       },
-                                               }
-                                       },
-                                       MessageSendEvent::SendChannelRangeQuery { ref node_id, ref msg } => {
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       },
-                                       MessageSendEvent::SendShortIdsQuery { ref node_id, ref msg } => {
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       }
-                                       MessageSendEvent::SendReplyChannelRange { ref node_id, ref msg } => {
-                                               log_gossip!(self.logger, "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}",
-                                                       log_pubkey!(node_id),
-                                                       msg.short_channel_ids.len(),
-                                                       msg.first_blocknum,
-                                                       msg.number_of_blocks,
-                                                       msg.sync_complete);
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
-                                       }
-                                       MessageSendEvent::SendGossipTimestampFilter { ref node_id, ref msg } => {
-                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                        }
                                }
-                       }
 
-                       for (node_id, msg) in self.message_handler.custom_message_handler.get_and_clear_pending_msg() {
-                               if peers_to_disconnect.get(&node_id).is_some() { continue; }
-                               self.enqueue_message(&mut *get_peer_for_forwarding!(&node_id), &msg);
-                       }
+                               for (node_id, msg) in self.message_handler.custom_message_handler.get_and_clear_pending_msg() {
+                                       if peers_to_disconnect.get(&node_id).is_some() { continue; }
+                                       self.enqueue_message(&mut *get_peer_for_forwarding!(&node_id), &msg);
+                               }
 
-                       for (descriptor, peer_mutex) in peers.iter() {
-                               let mut peer = peer_mutex.lock().unwrap();
-                               if flush_read_disabled { peer.received_channel_announce_since_backlogged = false; }
-                               self.do_attempt_write_data(&mut (*descriptor).clone(), &mut *peer, flush_read_disabled);
+                               for (descriptor, peer_mutex) in peers.iter() {
+                                       let mut peer = peer_mutex.lock().unwrap();
+                                       if flush_read_disabled { peer.received_channel_announce_since_backlogged = false; }
+                                       self.do_attempt_write_data(&mut (*descriptor).clone(), &mut *peer, flush_read_disabled);
+                               }
                        }
-               }
-               if !peers_to_disconnect.is_empty() {
-                       let mut peers_lock = self.peers.write().unwrap();
-                       let peers = &mut *peers_lock;
-                       for (node_id, msg) in peers_to_disconnect.drain() {
-                               // Note that since we are holding the peers *write* lock we can
-                               // remove from node_id_to_descriptor immediately (as no other
-                               // thread can be holding the peer lock if we have the global write
-                               // lock).
-
-                               let descriptor_opt = self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
-                               if let Some(mut descriptor) = descriptor_opt {
-                                       if let Some(peer_mutex) = peers.remove(&descriptor) {
-                                               let mut peer = peer_mutex.lock().unwrap();
-                                               if let Some(msg) = msg {
-                                                       log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
-                                                                       log_pubkey!(node_id),
-                                                                       msg.data);
-                                                       self.enqueue_message(&mut *peer, &msg);
-                                                       // This isn't guaranteed to work, but if there is enough free
-                                                       // room in the send buffer, put the error message there...
-                                                       self.do_attempt_write_data(&mut descriptor, &mut *peer, false);
-                                               }
-                                               self.do_disconnect(descriptor, &*peer, "DisconnectPeer HandleError");
-                                       } else { debug_assert!(false, "Missing connection for peer"); }
+                       if !peers_to_disconnect.is_empty() {
+                               let mut peers_lock = self.peers.write().unwrap();
+                               let peers = &mut *peers_lock;
+                               for (node_id, msg) in peers_to_disconnect.drain() {
+                                       // Note that since we are holding the peers *write* lock we can
+                                       // remove from node_id_to_descriptor immediately (as no other
+                                       // thread can be holding the peer lock if we have the global write
+                                       // lock).
+
+                                       let descriptor_opt = self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
+                                       if let Some(mut descriptor) = descriptor_opt {
+                                               if let Some(peer_mutex) = peers.remove(&descriptor) {
+                                                       let mut peer = peer_mutex.lock().unwrap();
+                                                       if let Some(msg) = msg {
+                                                               log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
+                                                                               log_pubkey!(node_id),
+                                                                               msg.data);
+                                                               self.enqueue_message(&mut *peer, &msg);
+                                                               // This isn't guaranteed to work, but if there is enough free
+                                                               // room in the send buffer, put the error message there...
+                                                               self.do_attempt_write_data(&mut descriptor, &mut *peer, false);
+                                                       }
+                                                       self.do_disconnect(descriptor, &*peer, "DisconnectPeer HandleError");
+                                               } else { debug_assert!(false, "Missing connection for peer"); }
+                                       }
                                }
                        }
+
+                       if self.event_processing_state.fetch_sub(1, Ordering::AcqRel) != 1 {
+                               // If another thread incremented the state while we were running we should go
+                               // around again, but only once.
+                               self.event_processing_state.store(1, Ordering::Release);
+                               continue;
+                       }
+                       break;
                }
        }
 
@@ -2348,8 +2375,9 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                addresses.sort_by_key(|addr| addr.get_id());
 
                let features = self.message_handler.chan_handler.provided_node_features()
-                       .or(self.message_handler.route_handler.provided_node_features())
-                       .or(self.message_handler.onion_message_handler.provided_node_features());
+                       | self.message_handler.route_handler.provided_node_features()
+                       | self.message_handler.onion_message_handler.provided_node_features()
+                       | self.message_handler.custom_message_handler.provided_node_features();
                let announcement = msgs::UnsignedNodeAnnouncement {
                        features,
                        timestamp: self.last_node_announcement_serial.fetch_add(1, Ordering::AcqRel),
@@ -2398,16 +2426,19 @@ fn is_gossip_msg(type_id: u16) -> bool {
 mod tests {
        use crate::sign::{NodeSigner, Recipient};
        use crate::events;
+       use crate::io;
+       use crate::ln::features::{InitFeatures, NodeFeatures};
        use crate::ln::peer_channel_encryptor::PeerChannelEncryptor;
-       use crate::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses};
+       use crate::ln::peer_handler::{CustomMessageHandler, PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses};
        use crate::ln::{msgs, wire};
-       use crate::ln::msgs::NetAddress;
+       use crate::ln::msgs::{LightningError, NetAddress};
        use crate::util::test_utils;
 
-       use bitcoin::secp256k1::SecretKey;
+       use bitcoin::secp256k1::{PublicKey, SecretKey};
 
        use crate::prelude::*;
        use crate::sync::{Arc, Mutex};
+       use core::convert::Infallible;
        use core::sync::atomic::{AtomicBool, Ordering};
 
        #[derive(Clone)]
@@ -2440,19 +2471,51 @@ mod tests {
        struct PeerManagerCfg {
                chan_handler: test_utils::TestChannelMessageHandler,
                routing_handler: test_utils::TestRoutingMessageHandler,
+               custom_handler: TestCustomMessageHandler,
                logger: test_utils::TestLogger,
                node_signer: test_utils::TestNodeSigner,
        }
 
+       struct TestCustomMessageHandler {
+               features: InitFeatures,
+       }
+
+       impl wire::CustomMessageReader for TestCustomMessageHandler {
+               type CustomMessage = Infallible;
+               fn read<R: io::Read>(&self, _: u16, _: &mut R) -> Result<Option<Self::CustomMessage>, msgs::DecodeError> {
+                       Ok(None)
+               }
+       }
+
+       impl CustomMessageHandler for TestCustomMessageHandler {
+               fn handle_custom_message(&self, _: Infallible, _: &PublicKey) -> Result<(), LightningError> {
+                       unreachable!();
+               }
+
+               fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> { Vec::new() }
+
+               fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() }
+
+               fn provided_init_features(&self, _: &PublicKey) -> InitFeatures {
+                       self.features.clone()
+               }
+       }
+
        fn create_peermgr_cfgs(peer_count: usize) -> Vec<PeerManagerCfg> {
                let mut cfgs = Vec::new();
                for i in 0..peer_count {
                        let node_secret = SecretKey::from_slice(&[42 + i as u8; 32]).unwrap();
+                       let features = {
+                               let mut feature_bits = vec![0u8; 33];
+                               feature_bits[32] = 0b00000001;
+                               InitFeatures::from_le_bytes(feature_bits)
+                       };
                        cfgs.push(
                                PeerManagerCfg{
                                        chan_handler: test_utils::TestChannelMessageHandler::new(),
                                        logger: test_utils::TestLogger::new(),
                                        routing_handler: test_utils::TestRoutingMessageHandler::new(),
+                                       custom_handler: TestCustomMessageHandler { features },
                                        node_signer: test_utils::TestNodeSigner::new(node_secret),
                                }
                        );
@@ -2461,13 +2524,36 @@ mod tests {
                cfgs
        }
 
-       fn create_network<'a>(peer_count: usize, cfgs: &'a Vec<PeerManagerCfg>) -> Vec<PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler, &'a test_utils::TestNodeSigner>> {
+       fn create_incompatible_peermgr_cfgs(peer_count: usize) -> Vec<PeerManagerCfg> {
+               let mut cfgs = Vec::new();
+               for i in 0..peer_count {
+                       let node_secret = SecretKey::from_slice(&[42 + i as u8; 32]).unwrap();
+                       let features = {
+                               let mut feature_bits = vec![0u8; 33 + i + 1];
+                               feature_bits[33 + i] = 0b00000001;
+                               InitFeatures::from_le_bytes(feature_bits)
+                       };
+                       cfgs.push(
+                               PeerManagerCfg{
+                                       chan_handler: test_utils::TestChannelMessageHandler::new(),
+                                       logger: test_utils::TestLogger::new(),
+                                       routing_handler: test_utils::TestRoutingMessageHandler::new(),
+                                       custom_handler: TestCustomMessageHandler { features },
+                                       node_signer: test_utils::TestNodeSigner::new(node_secret),
+                               }
+                       );
+               }
+
+               cfgs
+       }
+
+       fn create_network<'a>(peer_count: usize, cfgs: &'a Vec<PeerManagerCfg>) -> Vec<PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, &'a TestCustomMessageHandler, &'a test_utils::TestNodeSigner>> {
                let mut peers = Vec::new();
                for i in 0..peer_count {
                        let ephemeral_bytes = [i as u8; 32];
                        let msg_handler = MessageHandler {
                                chan_handler: &cfgs[i].chan_handler, route_handler: &cfgs[i].routing_handler,
-                               onion_message_handler: IgnoringMessageHandler {}, custom_message_handler: IgnoringMessageHandler {}
+                               onion_message_handler: IgnoringMessageHandler {}, custom_message_handler: &cfgs[i].custom_handler
                        };
                        let peer = PeerManager::new(msg_handler, 0, &ephemeral_bytes, &cfgs[i].logger, &cfgs[i].node_signer);
                        peers.push(peer);
@@ -2476,7 +2562,7 @@ mod tests {
                peers
        }
 
-       fn establish_connection<'a>(peer_a: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler, &'a test_utils::TestNodeSigner>, peer_b: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, IgnoringMessageHandler, &'a test_utils::TestNodeSigner>) -> (FileDescriptor, FileDescriptor) {
+       fn establish_connection<'a>(peer_a: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, &'a TestCustomMessageHandler, &'a test_utils::TestNodeSigner>, peer_b: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler, &'a test_utils::TestRoutingMessageHandler, IgnoringMessageHandler, &'a test_utils::TestLogger, &'a TestCustomMessageHandler, &'a test_utils::TestNodeSigner>) -> (FileDescriptor, FileDescriptor) {
                let id_a = peer_a.node_signer.get_node_id(Recipient::Node).unwrap();
                let mut fd_a = FileDescriptor {
                        fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
@@ -2591,6 +2677,42 @@ mod tests {
                thrd_b.join().unwrap();
        }
 
+       #[test]
+       fn test_incompatible_peers() {
+               let cfgs = create_peermgr_cfgs(2);
+               let incompatible_cfgs = create_incompatible_peermgr_cfgs(2);
+
+               let peers = create_network(2, &cfgs);
+               let incompatible_peers = create_network(2, &incompatible_cfgs);
+               let peer_pairs = [(&peers[0], &incompatible_peers[0]), (&incompatible_peers[1], &peers[1])];
+               for (peer_a, peer_b) in peer_pairs.iter() {
+                       let id_a = peer_a.node_signer.get_node_id(Recipient::Node).unwrap();
+                       let mut fd_a = FileDescriptor {
+                               fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
+                               disconnect: Arc::new(AtomicBool::new(false)),
+                       };
+                       let addr_a = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1000};
+                       let mut fd_b = FileDescriptor {
+                               fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
+                               disconnect: Arc::new(AtomicBool::new(false)),
+                       };
+                       let addr_b = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1001};
+                       let initial_data = peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap();
+                       peer_a.new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap();
+                       assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false);
+                       peer_a.process_events();
+
+                       let a_data = fd_a.outbound_data.lock().unwrap().split_off(0);
+                       assert_eq!(peer_b.read_event(&mut fd_b, &a_data).unwrap(), false);
+
+                       peer_b.process_events();
+                       let b_data = fd_b.outbound_data.lock().unwrap().split_off(0);
+
+                       // Should fail because of unknown required features
+                       assert!(peer_a.read_event(&mut fd_a, &b_data).is_err());
+               }
+       }
+
        #[test]
        fn test_disconnect_peer() {
                // Simple test which builds a network of PeerManager, connects and brings them to NoiseState::Finished and
@@ -2879,4 +3001,53 @@ mod tests {
                // For (None)
                assert_eq!(filter_addresses(None), None);
        }
+
+       #[test]
+       #[cfg(feature = "std")]
+       fn test_process_events_multithreaded() {
+               use std::time::{Duration, Instant};
+               // Test that `process_events` getting called on multiple threads doesn't generate too many
+               // loop iterations.
+               // Each time `process_events` goes around the loop we call
+               // `get_and_clear_pending_msg_events`, which we count using the `TestMessageHandler`.
+               // Because the loop should go around once more after a call which fails to take the
+               // single-threaded lock, if we write zero to the counter before calling `process_events` we
+               // should never observe there having been more than 2 loop iterations.
+               // Further, because the last thread to exit will call `process_events` before returning, we
+               // should always have at least one count at the end.
+               let cfg = Arc::new(create_peermgr_cfgs(1));
+               // Until we have std::thread::scoped we have to unsafe { turn off the borrow checker }.
+               let peer = Arc::new(create_network(1, unsafe { &*(&*cfg as *const _) as &'static _ }).pop().unwrap());
+
+               let exit_flag = Arc::new(AtomicBool::new(false));
+               macro_rules! spawn_thread { () => { {
+                       let thread_cfg = Arc::clone(&cfg);
+                       let thread_peer = Arc::clone(&peer);
+                       let thread_exit = Arc::clone(&exit_flag);
+                       std::thread::spawn(move || {
+                               while !thread_exit.load(Ordering::Acquire) {
+                                       thread_cfg[0].chan_handler.message_fetch_counter.store(0, Ordering::Release);
+                                       thread_peer.process_events();
+                                       std::thread::sleep(Duration::from_micros(1));
+                               }
+                       })
+               } } }
+
+               let thread_a = spawn_thread!();
+               let thread_b = spawn_thread!();
+               let thread_c = spawn_thread!();
+
+               let start_time = Instant::now();
+               while start_time.elapsed() < Duration::from_millis(100) {
+                       let val = cfg[0].chan_handler.message_fetch_counter.load(Ordering::Acquire);
+                       assert!(val <= 2);
+                       std::thread::yield_now(); // Winblowz seemingly doesn't ever interrupt threads?!
+               }
+
+               exit_flag.store(true, Ordering::Release);
+               thread_a.join().unwrap();
+               thread_b.join().unwrap();
+               thread_c.join().unwrap();
+               assert!(cfg[0].chan_handler.message_fetch_counter.load(Ordering::Acquire) >= 1);
+       }
 }
index 79443b3e84261d689f1ea8854278a2ca71c92699..633eceddbd8721c19e5623cbc334bc648e57362d 100644 (file)
@@ -19,14 +19,11 @@ use crate::util::test_utils;
 use crate::util::ser::Writeable;
 use crate::util::string::UntrustedString;
 
-use bitcoin::blockdata::block::{Block, BlockHeader};
 use bitcoin::blockdata::script::Builder;
 use bitcoin::blockdata::opcodes;
 use bitcoin::secp256k1::Secp256k1;
 
 use crate::prelude::*;
-use bitcoin::hashes::Hash;
-use bitcoin::TxMerkleNode;
 
 use crate::ln::functional_test_utils::*;
 
@@ -67,7 +64,6 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
        check_added_monitors!(nodes[2], 1);
        get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
 
-       let mut header = BlockHeader { version: 0x2000_0000, prev_blockhash: nodes[2].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
        let claim_txn = if local_commitment {
                // Broadcast node 1 commitment txn to broadcast the HTLC-Timeout
                let node_1_commitment_txn = get_local_commitment_txn!(nodes[1], chan_2.2);
@@ -77,7 +73,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                check_spends!(node_1_commitment_txn[1], node_1_commitment_txn[0]);
 
                // Give node 2 node 1's transactions and get its response (claiming the HTLC instead).
-               connect_block(&nodes[2], &Block { header, txdata: node_1_commitment_txn.clone() });
+               connect_block(&nodes[2], &create_dummy_block(nodes[2].best_block_hash(), 42, node_1_commitment_txn.clone()));
                check_added_monitors!(nodes[2], 1);
                check_closed_broadcast!(nodes[2], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
                check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
@@ -88,8 +84,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                // Make sure node 1's height is the same as the !local_commitment case
                connect_blocks(&nodes[1], 1);
                // Confirm node 1's commitment txn (and HTLC-Timeout) on node 1
-               header.prev_blockhash = nodes[1].best_block_hash();
-               connect_block(&nodes[1], &Block { header, txdata: node_1_commitment_txn.clone() });
+               connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, node_1_commitment_txn.clone()));
 
                // ...but return node 1's commitment tx in case claim is set and we're preparing to reorg
                vec![node_1_commitment_txn[0].clone(), node_2_commitment_txn[0].clone()]
@@ -125,11 +120,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                // Disconnect Node 1's HTLC-Timeout which was connected above
                disconnect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
-               let block = Block {
-                       header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
-                       txdata: claim_txn,
-               };
-               connect_block(&nodes[1], &block);
+               connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, claim_txn));
 
                // ChannelManager only polls chain::Watch::release_pending_monitor_events when we
                // probe it for events, so we probe non-message events here (which should just be the
@@ -137,11 +128,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), true, true);
        } else {
                // Confirm the timeout tx and check that we fail the HTLC backwards
-               let block = Block {
-                       header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
-                       txdata: vec![],
-               };
-               connect_block(&nodes[1], &block);
+               connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, Vec::new()));
                expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        }
 
index c5c08cf40321185a864aee4e814d39cc62f91abd..f134bd0569e9905cf7a54b5359215c6b434ae4d0 100644 (file)
@@ -40,7 +40,7 @@ use crate::io_extras::{copy, sink};
 use crate::prelude::*;
 use core::{cmp, fmt};
 use core::convert::TryFrom;
-use crate::sync::{RwLock, RwLockReadGuard};
+use crate::sync::{RwLock, RwLockReadGuard, LockTestExt};
 #[cfg(feature = "std")]
 use core::sync::atomic::{AtomicUsize, Ordering};
 use crate::sync::Mutex;
@@ -1327,9 +1327,14 @@ impl<L: Deref> fmt::Display for NetworkGraph<L> where L::Target: Logger {
 impl<L: Deref> Eq for NetworkGraph<L> where L::Target: Logger {}
 impl<L: Deref> PartialEq for NetworkGraph<L> where L::Target: Logger {
        fn eq(&self, other: &Self) -> bool {
-               self.genesis_hash == other.genesis_hash &&
-                       *self.channels.read().unwrap() == *other.channels.read().unwrap() &&
-                       *self.nodes.read().unwrap() == *other.nodes.read().unwrap()
+               // For a total lockorder, sort by position in memory and take the inner locks in that order.
+               // (Assumes that we can't move within memory while a lock is held).
+               let ord = ((self as *const _) as usize) < ((other as *const _) as usize);
+               let a = if ord { (&self.channels, &self.nodes) } else { (&other.channels, &other.nodes) };
+               let b = if ord { (&other.channels, &other.nodes) } else { (&self.channels, &self.nodes) };
+               let (channels_a, channels_b) = (a.0.unsafe_well_ordered_double_lock_self(), b.0.unsafe_well_ordered_double_lock_self());
+               let (nodes_a, nodes_b) = (a.1.unsafe_well_ordered_double_lock_self(), b.1.unsafe_well_ordered_double_lock_self());
+               self.genesis_hash.eq(&other.genesis_hash) && channels_a.eq(&channels_b) && nodes_a.eq(&nodes_b)
        }
 }
 
@@ -3388,31 +3393,28 @@ pub(crate) mod tests {
        }
 }
 
-#[cfg(all(test, feature = "_bench_unstable"))]
-mod benches {
+#[cfg(ldk_bench)]
+pub mod benches {
        use super::*;
-
-       use test::Bencher;
        use std::io::Read;
+       use criterion::{black_box, Criterion};
 
-       #[bench]
-       fn read_network_graph(bench: &mut Bencher) {
+       pub fn read_network_graph(bench: &mut Criterion) {
                let logger = crate::util::test_utils::TestLogger::new();
                let mut d = crate::routing::router::bench_utils::get_route_file().unwrap();
                let mut v = Vec::new();
                d.read_to_end(&mut v).unwrap();
-               bench.iter(|| {
-                       let _ = NetworkGraph::read(&mut std::io::Cursor::new(&v), &logger).unwrap();
-               });
+               bench.bench_function("read_network_graph", |b| b.iter(||
+                       NetworkGraph::read(&mut std::io::Cursor::new(black_box(&v)), &logger).unwrap()
+               ));
        }
 
-       #[bench]
-       fn write_network_graph(bench: &mut Bencher) {
+       pub fn write_network_graph(bench: &mut Criterion) {
                let logger = crate::util::test_utils::TestLogger::new();
                let mut d = crate::routing::router::bench_utils::get_route_file().unwrap();
                let net_graph = NetworkGraph::read(&mut d, &logger).unwrap();
-               bench.iter(|| {
-                       let _ = net_graph.encode();
-               });
+               bench.bench_function("write_network_graph", |b| b.iter(||
+                       black_box(&net_graph).encode()
+               ));
        }
 }
index b33e021ab4fc30357a1124885c45b076419aed61..ef6fef0a7eb42ea87b8d0876f0b11f99112d83cb 100644 (file)
@@ -1015,7 +1015,7 @@ struct PathBuildingHop<'a> {
        /// decrease as well. Thus, we have to explicitly track which nodes have been processed and
        /// avoid processing them again.
        was_processed: bool,
-       #[cfg(all(not(feature = "_bench_unstable"), any(test, fuzzing)))]
+       #[cfg(all(not(ldk_bench), any(test, fuzzing)))]
        // In tests, we apply further sanity checks on cases where we skip nodes we already processed
        // to ensure it is specifically in cases where the fee has gone down because of a decrease in
        // value_contribution_msat, which requires tracking it here. See comments below where it is
@@ -1036,7 +1036,7 @@ impl<'a> core::fmt::Debug for PathBuildingHop<'a> {
                        .field("path_penalty_msat", &self.path_penalty_msat)
                        .field("path_htlc_minimum_msat", &self.path_htlc_minimum_msat)
                        .field("cltv_expiry_delta", &self.candidate.cltv_expiry_delta());
-               #[cfg(all(not(feature = "_bench_unstable"), any(test, fuzzing)))]
+               #[cfg(all(not(ldk_bench), any(test, fuzzing)))]
                let debug_struct = debug_struct
                        .field("value_contribution_msat", &self.value_contribution_msat);
                debug_struct.finish()
@@ -1570,14 +1570,14 @@ where L::Target: Logger {
                                                                path_htlc_minimum_msat,
                                                                path_penalty_msat: u64::max_value(),
                                                                was_processed: false,
-                                                               #[cfg(all(not(feature = "_bench_unstable"), any(test, fuzzing)))]
+                                                               #[cfg(all(not(ldk_bench), any(test, fuzzing)))]
                                                                value_contribution_msat,
                                                        }
                                                });
 
                                                #[allow(unused_mut)] // We only use the mut in cfg(test)
                                                let mut should_process = !old_entry.was_processed;
-                                               #[cfg(all(not(feature = "_bench_unstable"), any(test, fuzzing)))]
+                                               #[cfg(all(not(ldk_bench), any(test, fuzzing)))]
                                                {
                                                        // In test/fuzzing builds, we do extra checks to make sure the skipping
                                                        // of already-seen nodes only happens in cases we expect (see below).
@@ -1648,13 +1648,13 @@ where L::Target: Logger {
                                                                old_entry.fee_msat = 0; // This value will be later filled with hop_use_fee_msat of the following channel
                                                                old_entry.path_htlc_minimum_msat = path_htlc_minimum_msat;
                                                                old_entry.path_penalty_msat = path_penalty_msat;
-                                                               #[cfg(all(not(feature = "_bench_unstable"), any(test, fuzzing)))]
+                                                               #[cfg(all(not(ldk_bench), any(test, fuzzing)))]
                                                                {
                                                                        old_entry.value_contribution_msat = value_contribution_msat;
                                                                }
                                                                did_add_update_path_to_src_node = true;
                                                        } else if old_entry.was_processed && new_cost < old_cost {
-                                                               #[cfg(all(not(feature = "_bench_unstable"), any(test, fuzzing)))]
+                                                               #[cfg(all(not(ldk_bench), any(test, fuzzing)))]
                                                                {
                                                                        // If we're skipping processing a node which was previously
                                                                        // processed even though we found another path to it with a
@@ -5791,44 +5791,26 @@ mod tests {
                println!("Using seed of {}", seed);
                seed
        }
-       #[cfg(not(feature = "no-std"))]
-       use crate::util::ser::ReadableArgs;
 
        #[test]
        #[cfg(not(feature = "no-std"))]
        fn generate_routes() {
                use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
 
-               let mut d = match super::bench_utils::get_route_file() {
+               let logger = ln_test_utils::TestLogger::new();
+               let graph = match super::bench_utils::read_network_graph(&logger) {
                        Ok(f) => f,
                        Err(e) => {
                                eprintln!("{}", e);
                                return;
                        },
                };
-               let logger = ln_test_utils::TestLogger::new();
-               let graph = NetworkGraph::read(&mut d, &logger).unwrap();
-               let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
-               let random_seed_bytes = keys_manager.get_secure_random_bytes();
 
-               // First, get 100 (source, destination) pairs for which route-getting actually succeeds...
-               let mut seed = random_init_seed() as usize;
-               let nodes = graph.read_only().nodes().clone();
-               'load_endpoints: for _ in 0..10 {
-                       loop {
-                               seed = seed.overflowing_mul(0xdeadbeef).0;
-                               let src = &PublicKey::from_slice(nodes.unordered_keys().skip(seed % nodes.len()).next().unwrap().as_slice()).unwrap();
-                               seed = seed.overflowing_mul(0xdeadbeef).0;
-                               let dst = PublicKey::from_slice(nodes.unordered_keys().skip(seed % nodes.len()).next().unwrap().as_slice()).unwrap();
-                               let payment_params = PaymentParameters::from_node_id(dst, 42);
-                               let amt = seed as u64 % 200_000_000;
-                               let params = ProbabilisticScoringFeeParameters::default();
-                               let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &graph, &logger);
-                               if get_route(src, &payment_params, &graph.read_only(), None, amt, &logger, &scorer, &params, &random_seed_bytes).is_ok() {
-                                       continue 'load_endpoints;
-                               }
-                       }
-               }
+               let params = ProbabilisticScoringFeeParameters::default();
+               let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &graph, &logger);
+               let features = super::InvoiceFeatures::empty();
+
+               super::bench_utils::generate_test_routes(&graph, &mut scorer, &params, features, random_init_seed(), 0, 2);
        }
 
        #[test]
@@ -5836,37 +5818,41 @@ mod tests {
        fn generate_routes_mpp() {
                use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
 
-               let mut d = match super::bench_utils::get_route_file() {
+               let logger = ln_test_utils::TestLogger::new();
+               let graph = match super::bench_utils::read_network_graph(&logger) {
                        Ok(f) => f,
                        Err(e) => {
                                eprintln!("{}", e);
                                return;
                        },
                };
+
+               let params = ProbabilisticScoringFeeParameters::default();
+               let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &graph, &logger);
+               let features = channelmanager::provided_invoice_features(&UserConfig::default());
+
+               super::bench_utils::generate_test_routes(&graph, &mut scorer, &params, features, random_init_seed(), 0, 2);
+       }
+
+       #[test]
+       #[cfg(not(feature = "no-std"))]
+       fn generate_large_mpp_routes() {
+               use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
+
                let logger = ln_test_utils::TestLogger::new();
-               let graph = NetworkGraph::read(&mut d, &logger).unwrap();
-               let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
-               let random_seed_bytes = keys_manager.get_secure_random_bytes();
-               let config = UserConfig::default();
+               let graph = match super::bench_utils::read_network_graph(&logger) {
+                       Ok(f) => f,
+                       Err(e) => {
+                               eprintln!("{}", e);
+                               return;
+                       },
+               };
 
-               // First, get 100 (source, destination) pairs for which route-getting actually succeeds...
-               let mut seed = random_init_seed() as usize;
-               let nodes = graph.read_only().nodes().clone();
-               'load_endpoints: for _ in 0..10 {
-                       loop {
-                               seed = seed.overflowing_mul(0xdeadbeef).0;
-                               let src = &PublicKey::from_slice(nodes.unordered_keys().skip(seed % nodes.len()).next().unwrap().as_slice()).unwrap();
-                               seed = seed.overflowing_mul(0xdeadbeef).0;
-                               let dst = PublicKey::from_slice(nodes.unordered_keys().skip(seed % nodes.len()).next().unwrap().as_slice()).unwrap();
-                               let payment_params = PaymentParameters::from_node_id(dst, 42).with_bolt11_features(channelmanager::provided_invoice_features(&config)).unwrap();
-                               let amt = seed as u64 % 200_000_000;
-                               let params = ProbabilisticScoringFeeParameters::default();
-                               let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &graph, &logger);
-                               if get_route(src, &payment_params, &graph.read_only(), None, amt, &logger, &scorer, &params, &random_seed_bytes).is_ok() {
-                                       continue 'load_endpoints;
-                               }
-                       }
-               }
+               let params = ProbabilisticScoringFeeParameters::default();
+               let mut scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &graph, &logger);
+               let features = channelmanager::provided_invoice_features(&UserConfig::default());
+
+               super::bench_utils::generate_test_routes(&graph, &mut scorer, &params, features, random_init_seed(), 1_000_000, 2);
        }
 
        #[test]
@@ -6052,9 +6038,23 @@ mod tests {
        }
 }
 
-#[cfg(all(test, not(feature = "no-std")))]
+#[cfg(all(any(test, ldk_bench), not(feature = "no-std")))]
 pub(crate) mod bench_utils {
+       use super::*;
        use std::fs::File;
+
+       use bitcoin::hashes::Hash;
+       use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
+
+       use crate::chain::transaction::OutPoint;
+       use crate::sign::{EntropySource, KeysManager};
+       use crate::ln::channelmanager::{self, ChannelCounterparty, ChannelDetails};
+       use crate::ln::features::InvoiceFeatures;
+       use crate::routing::gossip::NetworkGraph;
+       use crate::util::config::UserConfig;
+       use crate::util::ser::ReadableArgs;
+       use crate::util::test_utils::TestLogger;
+
        /// Tries to open a network graph file, or panics with a URL to fetch it.
        pub(crate) fn get_route_file() -> Result<std::fs::File, &'static str> {
                let res = File::open("net_graph-2023-01-18.bin") // By default we're run in RL/lightning
@@ -6068,7 +6068,18 @@ pub(crate) mod bench_utils {
                                path.pop(); // target
                                path.push("lightning");
                                path.push("net_graph-2023-01-18.bin");
-                               eprintln!("{}", path.to_str().unwrap());
+                               File::open(path)
+                       })
+                       .or_else(|_| { // Fall back to guessing based on the binary location for a subcrate
+                               // path is likely something like .../rust-lightning/bench/target/debug/deps/bench..
+                               let mut path = std::env::current_exe().unwrap();
+                               path.pop(); // bench...
+                               path.pop(); // deps
+                               path.pop(); // debug
+                               path.pop(); // target
+                               path.pop(); // bench
+                               path.push("lightning");
+                               path.push("net_graph-2023-01-18.bin");
                                File::open(path)
                        })
                .map_err(|_| "Please fetch https://bitcoin.ninja/ldk-net_graph-v0.0.113-2023-01-18.bin and place it at lightning/net_graph-2023-01-18.bin");
@@ -6077,42 +6088,18 @@ pub(crate) mod bench_utils {
                #[cfg(not(require_route_graph_test))]
                return res;
        }
-}
-
-#[cfg(all(test, feature = "_bench_unstable", not(feature = "no-std")))]
-mod benches {
-       use super::*;
-       use bitcoin::hashes::Hash;
-       use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
-       use crate::chain::transaction::OutPoint;
-       use crate::sign::{EntropySource, KeysManager};
-       use crate::ln::channelmanager::{self, ChannelCounterparty, ChannelDetails};
-       use crate::ln::features::InvoiceFeatures;
-       use crate::routing::gossip::NetworkGraph;
-       use crate::routing::scoring::{FixedPenaltyScorer, ProbabilisticScorer, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters};
-       use crate::util::config::UserConfig;
-       use crate::util::logger::{Logger, Record};
-       use crate::util::ser::ReadableArgs;
-
-       use test::Bencher;
-
-       struct DummyLogger {}
-       impl Logger for DummyLogger {
-               fn log(&self, _record: &Record) {}
-       }
 
-       fn read_network_graph(logger: &DummyLogger) -> NetworkGraph<&DummyLogger> {
-               let mut d = bench_utils::get_route_file().unwrap();
-               NetworkGraph::read(&mut d, logger).unwrap()
+       pub(crate) fn read_network_graph(logger: &TestLogger) -> Result<NetworkGraph<&TestLogger>, &'static str> {
+               get_route_file().map(|mut f| NetworkGraph::read(&mut f, logger).unwrap())
        }
 
-       fn payer_pubkey() -> PublicKey {
+       pub(crate) fn payer_pubkey() -> PublicKey {
                let secp_ctx = Secp256k1::new();
                PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap())
        }
 
        #[inline]
-       fn first_hop(node_id: PublicKey) -> ChannelDetails {
+       pub(crate) fn first_hop(node_id: PublicKey) -> ChannelDetails {
                ChannelDetails {
                        channel_id: [0; 32],
                        counterparty: ChannelCounterparty {
@@ -6130,11 +6117,11 @@ mod benches {
                        short_channel_id: Some(1),
                        inbound_scid_alias: None,
                        outbound_scid_alias: None,
-                       channel_value_satoshis: 10_000_000,
+                       channel_value_satoshis: 10_000_000_000,
                        user_channel_id: 0,
-                       balance_msat: 10_000_000,
-                       outbound_capacity_msat: 10_000_000,
-                       next_outbound_htlc_limit_msat: 10_000_000,
+                       balance_msat: 10_000_000_000,
+                       outbound_capacity_msat: 10_000_000_000,
+                       next_outbound_htlc_limit_msat: 10_000_000_000,
                        inbound_capacity_msat: 0,
                        unspendable_punishment_reserve: None,
                        confirmations_required: None,
@@ -6151,100 +6138,167 @@ mod benches {
                }
        }
 
-       #[bench]
-       fn generate_routes_with_zero_penalty_scorer(bench: &mut Bencher) {
-               let logger = DummyLogger {};
-               let network_graph = read_network_graph(&logger);
+       pub(crate) fn generate_test_routes<S: Score>(graph: &NetworkGraph<&TestLogger>, scorer: &mut S,
+               score_params: &S::ScoreParams, features: InvoiceFeatures, mut seed: u64,
+               starting_amount: u64, route_count: usize,
+       ) -> Vec<(ChannelDetails, PaymentParameters, u64)> {
+               let payer = payer_pubkey();
+               let keys_manager = KeysManager::new(&[0u8; 32], 42, 42);
+               let random_seed_bytes = keys_manager.get_secure_random_bytes();
+
+               let nodes = graph.read_only().nodes().clone();
+               let mut route_endpoints = Vec::new();
+               // Fetch 1.5x more routes than we need as after we do some scorer updates we may end up
+               // with some routes we picked being un-routable.
+               for _ in 0..route_count * 3 / 2 {
+                       loop {
+                               seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
+                               let src = PublicKey::from_slice(nodes.unordered_keys()
+                                       .skip((seed as usize) % nodes.len()).next().unwrap().as_slice()).unwrap();
+                               seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
+                               let dst = PublicKey::from_slice(nodes.unordered_keys()
+                                       .skip((seed as usize) % nodes.len()).next().unwrap().as_slice()).unwrap();
+                               let params = PaymentParameters::from_node_id(dst, 42)
+                                       .with_bolt11_features(features.clone()).unwrap();
+                               let first_hop = first_hop(src);
+                               let amt = starting_amount + seed % 1_000_000;
+                               let path_exists =
+                                       get_route(&payer, &params, &graph.read_only(), Some(&[&first_hop]),
+                                               amt, &TestLogger::new(), &scorer, score_params, &random_seed_bytes).is_ok();
+                               if path_exists {
+                                       // ...and seed the scorer with success and failure data...
+                                       seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
+                                       let mut score_amt = seed % 1_000_000_000;
+                                       loop {
+                                               // Generate fail/success paths for a wider range of potential amounts with
+                                               // MPP enabled to give us a chance to apply penalties for more potential
+                                               // routes.
+                                               let mpp_features = channelmanager::provided_invoice_features(&UserConfig::default());
+                                               let params = PaymentParameters::from_node_id(dst, 42)
+                                                       .with_bolt11_features(mpp_features).unwrap();
+
+                                               let route_res = get_route(&payer, &params, &graph.read_only(),
+                                                       Some(&[&first_hop]), score_amt, &TestLogger::new(), &scorer,
+                                                       score_params, &random_seed_bytes);
+                                               if let Ok(route) = route_res {
+                                                       for path in route.paths {
+                                                               if seed & 0x80 == 0 {
+                                                                       scorer.payment_path_successful(&path);
+                                                               } else {
+                                                                       let short_channel_id = path.hops[path.hops.len() / 2].short_channel_id;
+                                                                       scorer.payment_path_failed(&path, short_channel_id);
+                                                               }
+                                                               seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
+                                                       }
+                                                       break;
+                                               }
+                                               // If we couldn't find a path with a higer amount, reduce and try again.
+                                               score_amt /= 100;
+                                       }
+
+                                       route_endpoints.push((first_hop, params, amt));
+                                       break;
+                               }
+                       }
+               }
+
+               // Because we've changed channel scores, it's possible we'll take different routes to the
+               // selected destinations, possibly causing us to fail because, eg, the newly-selected path
+               // requires a too-high CLTV delta.
+               route_endpoints.retain(|(first_hop, params, amt)| {
+                       get_route(&payer, params, &graph.read_only(), Some(&[first_hop]), *amt,
+                               &TestLogger::new(), &scorer, score_params, &random_seed_bytes).is_ok()
+               });
+               route_endpoints.truncate(route_count);
+               assert_eq!(route_endpoints.len(), route_count);
+               route_endpoints
+       }
+}
+
+#[cfg(ldk_bench)]
+pub mod benches {
+       use super::*;
+       use crate::sign::{EntropySource, KeysManager};
+       use crate::ln::channelmanager;
+       use crate::ln::features::InvoiceFeatures;
+       use crate::routing::gossip::NetworkGraph;
+       use crate::routing::scoring::{FixedPenaltyScorer, ProbabilisticScorer, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters};
+       use crate::util::config::UserConfig;
+       use crate::util::logger::{Logger, Record};
+       use crate::util::test_utils::TestLogger;
+
+       use criterion::Criterion;
+
+       struct DummyLogger {}
+       impl Logger for DummyLogger {
+               fn log(&self, _record: &Record) {}
+       }
+
+       pub fn generate_routes_with_zero_penalty_scorer(bench: &mut Criterion) {
+               let logger = TestLogger::new();
+               let network_graph = bench_utils::read_network_graph(&logger).unwrap();
                let scorer = FixedPenaltyScorer::with_penalty(0);
-               generate_routes(bench, &network_graph, scorer, &(), InvoiceFeatures::empty());
+               generate_routes(bench, &network_graph, scorer, &(), InvoiceFeatures::empty(), 0,
+                       "generate_routes_with_zero_penalty_scorer");
        }
 
-       #[bench]
-       fn generate_mpp_routes_with_zero_penalty_scorer(bench: &mut Bencher) {
-               let logger = DummyLogger {};
-               let network_graph = read_network_graph(&logger);
+       pub fn generate_mpp_routes_with_zero_penalty_scorer(bench: &mut Criterion) {
+               let logger = TestLogger::new();
+               let network_graph = bench_utils::read_network_graph(&logger).unwrap();
                let scorer = FixedPenaltyScorer::with_penalty(0);
-               generate_routes(bench, &network_graph, scorer, &(), channelmanager::provided_invoice_features(&UserConfig::default()));
+               generate_routes(bench, &network_graph, scorer, &(),
+                       channelmanager::provided_invoice_features(&UserConfig::default()), 0,
+                       "generate_mpp_routes_with_zero_penalty_scorer");
        }
 
-       #[bench]
-       fn generate_routes_with_probabilistic_scorer(bench: &mut Bencher) {
-               let logger = DummyLogger {};
-               let network_graph = read_network_graph(&logger);
+       pub fn generate_routes_with_probabilistic_scorer(bench: &mut Criterion) {
+               let logger = TestLogger::new();
+               let network_graph = bench_utils::read_network_graph(&logger).unwrap();
                let params = ProbabilisticScoringFeeParameters::default();
                let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
-               generate_routes(bench, &network_graph, scorer, &params, InvoiceFeatures::empty());
+               generate_routes(bench, &network_graph, scorer, &params, InvoiceFeatures::empty(), 0,
+                       "generate_routes_with_probabilistic_scorer");
        }
 
-       #[bench]
-       fn generate_mpp_routes_with_probabilistic_scorer(bench: &mut Bencher) {
-               let logger = DummyLogger {};
-               let network_graph = read_network_graph(&logger);
+       pub fn generate_mpp_routes_with_probabilistic_scorer(bench: &mut Criterion) {
+               let logger = TestLogger::new();
+               let network_graph = bench_utils::read_network_graph(&logger).unwrap();
                let params = ProbabilisticScoringFeeParameters::default();
                let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
-               generate_routes(bench, &network_graph, scorer, &params, channelmanager::provided_invoice_features(&UserConfig::default()));
+               generate_routes(bench, &network_graph, scorer, &params,
+                       channelmanager::provided_invoice_features(&UserConfig::default()), 0,
+                       "generate_mpp_routes_with_probabilistic_scorer");
+       }
+
+       pub fn generate_large_mpp_routes_with_probabilistic_scorer(bench: &mut Criterion) {
+               let logger = TestLogger::new();
+               let network_graph = bench_utils::read_network_graph(&logger).unwrap();
+               let params = ProbabilisticScoringFeeParameters::default();
+               let scorer = ProbabilisticScorer::new(ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
+               generate_routes(bench, &network_graph, scorer, &params,
+                       channelmanager::provided_invoice_features(&UserConfig::default()), 100_000_000,
+                       "generate_large_mpp_routes_with_probabilistic_scorer");
        }
 
        fn generate_routes<S: Score>(
-               bench: &mut Bencher, graph: &NetworkGraph<&DummyLogger>, mut scorer: S, score_params: &S::ScoreParams,
-               features: InvoiceFeatures
+               bench: &mut Criterion, graph: &NetworkGraph<&TestLogger>, mut scorer: S,
+               score_params: &S::ScoreParams, features: InvoiceFeatures, starting_amount: u64,
+               bench_name: &'static str,
        ) {
-               let nodes = graph.read_only().nodes().clone();
-               let payer = payer_pubkey();
+               let payer = bench_utils::payer_pubkey();
                let keys_manager = KeysManager::new(&[0u8; 32], 42, 42);
                let random_seed_bytes = keys_manager.get_secure_random_bytes();
 
                // First, get 100 (source, destination) pairs for which route-getting actually succeeds...
-               let mut routes = Vec::new();
-               let mut route_endpoints = Vec::new();
-               let mut seed: usize = 0xdeadbeef;
-               'load_endpoints: for _ in 0..150 {
-                       loop {
-                               seed *= 0xdeadbeef;
-                               let src = PublicKey::from_slice(nodes.unordered_keys().skip(seed % nodes.len()).next().unwrap().as_slice()).unwrap();
-                               seed *= 0xdeadbeef;
-                               let dst = PublicKey::from_slice(nodes.unordered_keys().skip(seed % nodes.len()).next().unwrap().as_slice()).unwrap();
-                               let params = PaymentParameters::from_node_id(dst, 42).with_bolt11_features(features.clone()).unwrap();
-                               let first_hop = first_hop(src);
-                               let amt = seed as u64 % 1_000_000;
-                               if let Ok(route) = get_route(&payer, &params, &graph.read_only(), Some(&[&first_hop]), amt, &DummyLogger{}, &scorer, score_params, &random_seed_bytes) {
-                                       routes.push(route);
-                                       route_endpoints.push((first_hop, params, amt));
-                                       continue 'load_endpoints;
-                               }
-                       }
-               }
-
-               // ...and seed the scorer with success and failure data...
-               for route in routes {
-                       let amount = route.get_total_amount();
-                       if amount < 250_000 {
-                               for path in route.paths {
-                                       scorer.payment_path_successful(&path);
-                               }
-                       } else if amount > 750_000 {
-                               for path in route.paths {
-                                       let short_channel_id = path.hops[path.hops.len() / 2].short_channel_id;
-                                       scorer.payment_path_failed(&path, short_channel_id);
-                               }
-                       }
-               }
-
-               // Because we've changed channel scores, its possible we'll take different routes to the
-               // selected destinations, possibly causing us to fail because, eg, the newly-selected path
-               // requires a too-high CLTV delta.
-               route_endpoints.retain(|(first_hop, params, amt)| {
-                       get_route(&payer, params, &graph.read_only(), Some(&[first_hop]), *amt, &DummyLogger{}, &scorer, score_params, &random_seed_bytes).is_ok()
-               });
-               route_endpoints.truncate(100);
-               assert_eq!(route_endpoints.len(), 100);
+               let route_endpoints = bench_utils::generate_test_routes(graph, &mut scorer, score_params, features, 0xdeadbeef, starting_amount, 50);
 
                // ...then benchmark finding paths between the nodes we learned.
                let mut idx = 0;
-               bench.iter(|| {
+               bench.bench_function(bench_name, |b| b.iter(|| {
                        let (first_hop, params, amt) = &route_endpoints[idx % route_endpoints.len()];
-                       assert!(get_route(&payer, params, &graph.read_only(), Some(&[first_hop]), *amt, &DummyLogger{}, &scorer, score_params, &random_seed_bytes).is_ok());
+                       assert!(get_route(&payer, params, &graph.read_only(), Some(&[first_hop]), *amt,
+                               &DummyLogger{}, &scorer, score_params, &random_seed_bytes).is_ok());
                        idx += 1;
-               });
+               }));
        }
 }
index cd898f12b32e41ca7523f79843f4a1ceff3407be..226a2bab72b9c35d26bcac26d462a3d34bfe2a6c 100644 (file)
@@ -1628,8 +1628,8 @@ pub fn dyn_sign() {
        let _signer: Box<dyn EcdsaChannelSigner>;
 }
 
-#[cfg(all(test, feature = "_bench_unstable", not(feature = "no-std")))]
-mod benches {
+#[cfg(ldk_bench)]
+pub mod benches {
        use std::sync::{Arc, mpsc};
        use std::sync::mpsc::TryRecvError;
        use std::thread;
@@ -1638,10 +1638,9 @@ mod benches {
        use bitcoin::Network;
        use crate::sign::{EntropySource, KeysManager};
 
-       use test::Bencher;
+       use criterion::Criterion;
 
-       #[bench]
-       fn bench_get_secure_random_bytes(bench: &mut Bencher) {
+       pub fn bench_get_secure_random_bytes(bench: &mut Criterion) {
                let seed = [0u8; 32];
                let now = Duration::from_secs(genesis_block(Network::Testnet).header.time as u64);
                let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_micros()));
@@ -1667,11 +1666,8 @@ mod benches {
                        stops.push(stop_sender);
                }
 
-               bench.iter(|| {
-                       for _ in 1..100 {
-                               keys_manager.get_secure_random_bytes();
-                       }
-               });
+               bench.bench_function("get_secure_random_bytes", |b| b.iter(||
+                       keys_manager.get_secure_random_bytes()));
 
                for stop in stops {
                        let _ = stop.send(());
@@ -1680,5 +1676,4 @@ mod benches {
                        handle.join().unwrap();
                }
        }
-
 }
index 1b2b9a739b8c5d3078204917f55b0fef86e87f13..348bd90274ae1632b1cec63cf98e9249d9ac7425 100644 (file)
@@ -3,7 +3,7 @@
 pub(crate) enum LockHeldState {
        HeldByThread,
        NotHeldByThread,
-       #[cfg(any(feature = "_bench_unstable", not(test)))]
+       #[cfg(any(ldk_bench, not(test)))]
        Unsupported,
 }
 
@@ -20,20 +20,20 @@ pub(crate) trait LockTestExt<'a> {
        fn unsafe_well_ordered_double_lock_self(&'a self) -> Self::ExclLock;
 }
 
-#[cfg(all(feature = "std", not(feature = "_bench_unstable"), test))]
+#[cfg(all(feature = "std", not(ldk_bench), test))]
 mod debug_sync;
-#[cfg(all(feature = "std", not(feature = "_bench_unstable"), test))]
+#[cfg(all(feature = "std", not(ldk_bench), test))]
 pub use debug_sync::*;
-#[cfg(all(feature = "std", not(feature = "_bench_unstable"), test))]
+#[cfg(all(feature = "std", not(ldk_bench), test))]
 // Note that to make debug_sync's regex work this must not contain `debug_string` in the module name
 mod test_lockorder_checks;
 
-#[cfg(all(feature = "std", any(feature = "_bench_unstable", not(test))))]
+#[cfg(all(feature = "std", any(ldk_bench, not(test))))]
 pub(crate) mod fairrwlock;
-#[cfg(all(feature = "std", any(feature = "_bench_unstable", not(test))))]
+#[cfg(all(feature = "std", any(ldk_bench, not(test))))]
 pub use {std::sync::{Arc, Mutex, Condvar, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}, fairrwlock::FairRwLock};
 
-#[cfg(all(feature = "std", any(feature = "_bench_unstable", not(test))))]
+#[cfg(all(feature = "std", any(ldk_bench, not(test))))]
 mod ext_impl {
        use super::*;
        impl<'a, T: 'a> LockTestExt<'a> for Mutex<T> {
index e51bbb9271983986650ac31fef823473ee498f0a..331ef3b1172c9492d34c39626b556e31bc2e1a64 100644 (file)
@@ -341,17 +341,20 @@ impl TestBroadcaster {
 }
 
 impl chaininterface::BroadcasterInterface for TestBroadcaster {
-       fn broadcast_transaction(&self, tx: &Transaction) {
-               let lock_time = tx.lock_time.0;
-               assert!(lock_time < 1_500_000_000);
-               if bitcoin::LockTime::from(tx.lock_time).is_block_height() && lock_time > self.blocks.lock().unwrap().last().unwrap().1 {
-                       for inp in tx.input.iter() {
-                               if inp.sequence != Sequence::MAX {
-                                       panic!("We should never broadcast a transaction before its locktime ({})!", tx.lock_time);
+       fn broadcast_transactions(&self, txs: &[&Transaction]) {
+               for tx in txs {
+                       let lock_time = tx.lock_time.0;
+                       assert!(lock_time < 1_500_000_000);
+                       if bitcoin::LockTime::from(tx.lock_time).is_block_height() && lock_time > self.blocks.lock().unwrap().last().unwrap().1 {
+                               for inp in tx.input.iter() {
+                                       if inp.sequence != Sequence::MAX {
+                                               panic!("We should never broadcast a transaction before its locktime ({})!", tx.lock_time);
+                                       }
                                }
                        }
                }
-               self.txn_broadcasted.lock().unwrap().push(tx.clone());
+               let owned_txs: Vec<Transaction> = txs.iter().map(|tx| (*tx).clone()).collect();
+               self.txn_broadcasted.lock().unwrap().extend(owned_txs);
        }
 }
 
@@ -359,6 +362,7 @@ pub struct TestChannelMessageHandler {
        pub pending_events: Mutex<Vec<events::MessageSendEvent>>,
        expected_recv_msgs: Mutex<Option<Vec<wire::Message<()>>>>,
        connected_peers: Mutex<HashSet<PublicKey>>,
+       pub message_fetch_counter: AtomicUsize,
 }
 
 impl TestChannelMessageHandler {
@@ -367,6 +371,7 @@ impl TestChannelMessageHandler {
                        pending_events: Mutex::new(Vec::new()),
                        expected_recv_msgs: Mutex::new(None),
                        connected_peers: Mutex::new(HashSet::new()),
+                       message_fetch_counter: AtomicUsize::new(0),
                }
        }
 
@@ -517,6 +522,7 @@ impl msgs::ChannelMessageHandler for TestChannelMessageHandler {
 
 impl events::MessageSendEventsProvider for TestChannelMessageHandler {
        fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
+               self.message_fetch_counter.fetch_add(1, Ordering::AcqRel);
                let mut pending_events = self.pending_events.lock().unwrap();
                let mut ret = Vec::new();
                mem::swap(&mut ret, &mut *pending_events);
@@ -738,7 +744,7 @@ impl Logger for TestLogger {
        fn log(&self, record: &Record) {
                *self.lines.lock().unwrap().entry((record.module_path.to_string(), format!("{}", record.args))).or_insert(0) += 1;
                if record.level >= self.level {
-                       #[cfg(feature = "std")]
+                       #[cfg(all(not(ldk_bench), feature = "std"))]
                        println!("{:<5} {} [{} : {}, {}] {}", record.level.to_string(), self.id, record.module_path, record.file, record.line, record.args);
                }
        }