Merge pull request #1475 from atalw/2022-04-paymentforwarded-event
authorvalentinewallace <valentinewallace@users.noreply.github.com>
Mon, 16 May 2022 18:21:39 +0000 (14:21 -0400)
committerGitHub <noreply@github.com>
Mon, 16 May 2022 18:21:39 +0000 (14:21 -0400)
Expose `next_channel_id` in `PaymentForwarded` event

64 files changed:
CONTRIBUTING.md
fuzz/Cargo.toml
fuzz/README.md
fuzz/src/chanmon_consistency.rs
fuzz/src/full_stack.rs
fuzz/src/peer_crypt.rs
fuzz/src/router.rs
lightning-background-processor/Cargo.toml
lightning-background-processor/src/lib.rs
lightning-block-sync/Cargo.toml
lightning-block-sync/src/init.rs
lightning-block-sync/src/test_utils.rs
lightning-invoice/Cargo.toml
lightning-invoice/src/de.rs
lightning-invoice/src/lib.rs
lightning-invoice/src/payment.rs
lightning-invoice/src/utils.rs
lightning-invoice/tests/ser_de.rs
lightning-net-tokio/Cargo.toml
lightning-net-tokio/src/lib.rs
lightning-persister/Cargo.toml
lightning-persister/src/util.rs
lightning/Cargo.toml
lightning/src/chain/chainmonitor.rs
lightning/src/chain/channelmonitor.rs
lightning/src/chain/keysinterface.rs
lightning/src/chain/mod.rs
lightning/src/chain/onchaintx.rs
lightning/src/chain/package.rs
lightning/src/debug_sync.rs
lightning/src/lib.rs
lightning/src/ln/chan_utils.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/features.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/inbound_payment.rs
lightning/src/ln/msgs.rs
lightning/src/ln/onion_route_tests.rs
lightning/src/ln/onion_utils.rs
lightning/src/ln/peer_channel_encryptor.rs
lightning/src/ln/peer_handler.rs
lightning/src/ln/priv_short_conf_tests.rs
lightning/src/ln/reorg_tests.rs
lightning/src/ln/script.rs
lightning/src/ln/shutdown_tests.rs
lightning/src/ln/wire.rs
lightning/src/routing/network_graph.rs
lightning/src/routing/router.rs
lightning/src/routing/scoring.rs
lightning/src/sync.rs
lightning/src/util/config.rs
lightning/src/util/crypto.rs
lightning/src/util/enforcing_trait_impls.rs
lightning/src/util/events.rs
lightning/src/util/fairrwlock.rs [new file with mode: 0644]
lightning/src/util/macro_logger.rs
lightning/src/util/message_signing.rs
lightning/src/util/mod.rs
lightning/src/util/persist.rs
lightning/src/util/ser.rs
lightning/src/util/test_utils.rs
lightning/src/util/transaction_utils.rs

index ab6ba2a410fb245c13fc1c7a6157a36448e9d07f..e2c5252dc6ff4213cddb0d2c21ee184471e2b952 100644 (file)
@@ -56,7 +56,7 @@ The codebase is maintained using the "contributor workflow" where everyone
 without exception contributes patch proposals using "pull requests". This
 facilitates social contribution, easy testing and peer review.
 
-To contribute a patch, the worflow is a as follows:
+To contribute a patch, the workflow is as follows:
 
   1. Fork Repository
   2. Create topic branch
@@ -73,7 +73,7 @@ must be given to the long term technical debt. Every new features should
 be covered by functional tests.
 
 When refactoring, structure your PR to make it easy to review and don't
-hestitate to split it into multiple small, focused PRs.
+hesitate to split it into multiple small, focused PRs.
 
 The Minimum Supported Rust Version is 1.41.1 (enforced by our GitHub Actions).
 
index bc1f0a479f1cb074937d689b33c398c73d2f6420..88e577617b54faeab07ba0e61a213d29f726b1ba 100644 (file)
@@ -19,7 +19,7 @@ stdin_fuzz = []
 [dependencies]
 afl = { version = "0.4", optional = true }
 lightning = { path = "../lightning", features = ["regex"] }
-bitcoin = { version = "0.27", features = ["fuzztarget", "secp-lowmemory"] }
+bitcoin = { version = "0.28.1", features = ["secp-lowmemory"] }
 hex = "0.3"
 honggfuzz = { version = "0.5", optional = true }
 libfuzzer-sys = { git = "https://github.com/rust-fuzz/libfuzzer-sys.git", optional = true }
index dfa90fc0f979aa9d5f41de43e250140183939ec9..bfc8fa5f4bfb5c6ae71b5d1262d17bb831d1ee52 100644 (file)
@@ -24,6 +24,13 @@ cargo update
 cargo install --force honggfuzz
 ```
 
+In some environments, you may want to pin the honggfuzz version to `0.5.52`:
+
+```shell
+cargo update -p honggfuzz --precise "0.5.52"
+cargo install --force honggfuzz --version "0.5.52"
+```
+
 ### Execution
 
 To run the Hongg fuzzer, do
@@ -34,9 +41,11 @@ export HFUZZ_BUILD_ARGS="--features honggfuzz_fuzz"
 export HFUZZ_RUN_ARGS="-n $CPU_COUNT --exit_upon_crash"
 
 export TARGET="msg_ping_target" # replace with the target to be fuzzed
-cargo hfuzz run $TARGET 
+cargo hfuzz run $TARGET
 ```
 
+(Or, for a prettier output, replace the last line with `cargo --color always hfuzz run $TARGET`.)
+
 To see a list of available fuzzing targets, run:
 
 ```shell
@@ -84,4 +93,38 @@ export RUSTFLAGS="--cfg=fuzzing"
 cargo test
 ```
 
+Note that if the fuzz test failed locally, moving the offending run's trace 
+to the `test_cases` folder should also do the trick; simply replace the `echo $HEX |` line above
+with (the trace file name is of course a bit longer than in the example):
+
+```shell
+mv hfuzz_workspace/fuzz_target/SIGABRT.PC.7ffff7e21ce1.STACK.[…].fuzz ./test_cases/$TARGET/
+```
+
 This will reproduce the failing fuzz input and yield a usable stack trace.
+
+
+## How do I add a new fuzz test?
+
+1. The easiest approach is to take one of the files in `fuzz/src/`, such as 
+`process_network_graph.rs`, and duplicate it, renaming the new file to something more 
+suitable. For the sake of example, let's call the new fuzz target we're creating 
+`my_fuzzy_experiment`.
+
+2. In the newly created file `fuzz/src/my_fuzzy_experiment.rs`, run a string substitution
+of `process_network_graph` to `my_fuzzy_experiment`, such that the three methods in the
+file are `do_test`, `my_fuzzy_experiment_test`, and `my_fuzzy_experiment_run`.
+
+3. Adjust the body (not the signature!) of `do_test` as necessary for the new fuzz test.
+
+4. In `fuzz/src/bin/gen_target.sh`, add a line reading `GEN_TEST my_fuzzy_experiment` to the 
+first group of `GEN_TEST` lines (starting in line 9).
+
+5. If your test relies on a new local crate, add that crate as a dependency to `fuzz/Cargo.toml`.
+
+6. In `fuzz/src/lib.rs`, add the line `pub mod my_fuzzy_experiment`. Additionally, if 
+you added a new crate dependency, add the `extern crate […]` import line.
+
+7. Run `fuzz/src/bin/gen_target.sh`.
+
+8. There is no step eight: happy fuzzing!
index 7327f37db4891ea03ecab0b5a1657f4fb3325ef2..5db658c5818195866843b1a2dc1d386507a3a633 100644 (file)
@@ -53,8 +53,8 @@ use lightning::routing::router::{Route, RouteHop};
 use utils::test_logger::{self, Output};
 use utils::test_persister::TestPersister;
 
-use bitcoin::secp256k1::key::{PublicKey,SecretKey};
-use bitcoin::secp256k1::recovery::RecoverableSignature;
+use bitcoin::secp256k1::{PublicKey,SecretKey};
+use bitcoin::secp256k1::ecdsa::RecoverableSignature;
 use bitcoin::secp256k1::Secp256k1;
 
 use std::mem;
index 0412547a0089a150db7f8038d7c4454a8bf05b02..330124f8a8942a23585517148096a80bc984dd4b 100644 (file)
@@ -50,8 +50,8 @@ use lightning::util::ser::ReadableArgs;
 use utils::test_logger;
 use utils::test_persister::TestPersister;
 
-use bitcoin::secp256k1::key::{PublicKey,SecretKey};
-use bitcoin::secp256k1::recovery::RecoverableSignature;
+use bitcoin::secp256k1::{PublicKey,SecretKey};
+use bitcoin::secp256k1::ecdsa::RecoverableSignature;
 use bitcoin::secp256k1::Secp256k1;
 
 use std::cell::RefCell;
index f41137fc828b02c003110d34d4f91f4f84fd51d4..9bef432982497af54dc5c0f9912e22558f79f30b 100644 (file)
@@ -9,7 +9,7 @@
 
 use lightning::ln::peer_channel_encryptor::PeerChannelEncryptor;
 
-use bitcoin::secp256k1::key::{PublicKey,SecretKey};
+use bitcoin::secp256k1::{PublicKey,SecretKey};
 
 use utils::test_logger;
 
index b0f052dbc646f613a1d8c038c88c96c3604ef88e..786bfa3e589eb05dc393a807be90efc46c5febf7 100644 (file)
@@ -23,12 +23,13 @@ use lightning::util::ser::Readable;
 use lightning::routing::network_graph::{NetworkGraph, RoutingFees};
 
 use bitcoin::hashes::Hash;
-use bitcoin::secp256k1::key::PublicKey;
+use bitcoin::secp256k1::PublicKey;
 use bitcoin::network::constants::Network;
 use bitcoin::blockdata::constants::genesis_block;
 
 use utils::test_logger;
 
+use std::convert::TryInto;
 use std::collections::HashSet;
 use std::sync::Arc;
 use std::sync::atomic::{AtomicUsize, Ordering};
@@ -205,7 +206,8 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                        count => {
                                                for _ in 0..count {
                                                        scid += 1;
-                                                       let rnid = node_pks.iter().skip(slice_to_be16(get_slice!(2))as usize % node_pks.len()).next().unwrap();
+                                                       let rnid = node_pks.iter().skip(u16::from_be_bytes(get_slice!(2).try_into().unwrap()) as usize % node_pks.len()).next().unwrap();
+                                                       let capacity = u64::from_be_bytes(get_slice!(8).try_into().unwrap());
                                                        first_hops_vec.push(ChannelDetails {
                                                                channel_id: [0; 32],
                                                                counterparty: ChannelCounterparty {
@@ -220,7 +222,7 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                                                channel_type: None,
                                                                short_channel_id: Some(scid),
                                                                inbound_scid_alias: None,
-                                                               channel_value_satoshis: slice_to_be64(get_slice!(8)),
+                                                               channel_value_satoshis: capacity,
                                                                user_channel_id: 0, inbound_capacity_msat: 0,
                                                                unspendable_punishment_reserve: None,
                                                                confirmations_required: None,
@@ -228,7 +230,8 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                                                is_outbound: true, is_funding_locked: true,
                                                                is_usable: true, is_public: true,
                                                                balance_msat: 0,
-                                                               outbound_capacity_msat: 0,
+                                                               outbound_capacity_msat: capacity.saturating_mul(1000),
+                                                               next_outbound_htlc_limit_msat: capacity.saturating_mul(1000),
                                                                inbound_htlc_minimum_msat: None,
                                                                inbound_htlc_maximum_msat: None,
                                                        });
index 16ec763fb8c3a4f48f5bb3ea773e4372b12984f3..00061ee6e5e852122e490b42420db81ca53c5859 100644 (file)
@@ -14,7 +14,7 @@ all-features = true
 rustdoc-args = ["--cfg", "docsrs"]
 
 [dependencies]
-bitcoin = "0.27"
+bitcoin = "0.28.1"
 lightning = { version = "0.0.106", path = "../lightning", features = ["std"] }
 
 [dev-dependencies]
index 6beee915b309772ae500bd61211a929e6382682d..107f65f9b74f141cdb087eded0befe59d4c9dfac 100644 (file)
@@ -18,6 +18,7 @@ use lightning::ln::channelmanager::ChannelManager;
 use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
 use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor};
 use lightning::routing::network_graph::{NetworkGraph, NetGraphMsgHandler};
+use lightning::routing::scoring::WriteableScore;
 use lightning::util::events::{Event, EventHandler, EventsProvider};
 use lightning::util::logger::Logger;
 use lightning::util::persist::Persister;
@@ -151,6 +152,7 @@ impl BackgroundProcessor {
        /// [`NetworkGraph`]: lightning::routing::network_graph::NetworkGraph
        /// [`NetworkGraph::write`]: lightning::routing::network_graph::NetworkGraph#impl-Writeable
        pub fn start<
+               'a,
                Signer: 'static + Sign,
                CA: 'static + Deref + Send + Sync,
                CF: 'static + Deref + Send + Sync,
@@ -171,9 +173,11 @@ impl BackgroundProcessor {
                NG: 'static + Deref<Target = NetGraphMsgHandler<G, CA, L>> + Send + Sync,
                UMH: 'static + Deref + Send + Sync,
                PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, L, UMH>> + Send + Sync,
+               S: 'static + Deref<Target = SC> + Send + Sync,
+               SC: WriteableScore<'a>,
        >(
                persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
-               net_graph_msg_handler: Option<NG>, peer_manager: PM, logger: L
+               net_graph_msg_handler: Option<NG>, peer_manager: PM, logger: L, scorer: Option<S>
        ) -> Self
        where
                CA::Target: 'static + chain::Access,
@@ -187,7 +191,7 @@ impl BackgroundProcessor {
                CMH::Target: 'static + ChannelMessageHandler,
                RMH::Target: 'static + RoutingMessageHandler,
                UMH::Target: 'static + CustomMessageHandler,
-               PS::Target: 'static + Persister<Signer, CW, T, K, F, L>
+               PS::Target: 'static + Persister<'a, Signer, CW, T, K, F, L, SC>,
        {
                let stop_thread = Arc::new(AtomicBool::new(false));
                let stop_thread_clone = stop_thread.clone();
@@ -203,10 +207,22 @@ impl BackgroundProcessor {
                        let mut have_pruned = false;
 
                        loop {
-                               peer_manager.process_events(); // Note that this may block on ChannelManager's locking
                                channel_manager.process_pending_events(&event_handler);
                                chain_monitor.process_pending_events(&event_handler);
 
+                               // Note that the PeerManager::process_events may block on ChannelManager's locks,
+                               // hence it comes last here. When the ChannelManager finishes whatever it's doing,
+                               // we want to ensure we get into `persist_manager` as quickly as we can, especially
+                               // without running the normal event processing above and handing events to users.
+                               //
+                               // Specifically, on an *extremely* slow machine, we may see ChannelManager start
+                               // processing a message effectively at any point during this loop. In order to
+                               // minimize the time between such processing completing and persisting the updated
+                               // ChannelManager, we want to minimize methods blocking on a ChannelManager
+                               // generally, and as a fallback place such blocking only immediately before
+                               // persistence.
+                               peer_manager.process_events();
+
                                // We wait up to 100ms, but track how long it takes to detect being put to sleep,
                                // see `await_start`'s use below.
                                let await_start = Instant::now();
@@ -262,9 +278,16 @@ impl BackgroundProcessor {
                                                if let Err(e) = persister.persist_graph(handler.network_graph()) {
                                                        log_error!(logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
                                                }
-                                               last_prune_call = Instant::now();
-                                               have_pruned = true;
                                        }
+                                       if let Some(ref scorer) = scorer {
+                                               log_trace!(logger, "Persisting scorer");
+                                               if let Err(e) = persister.persist_scorer(&scorer) {
+                                                       log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
+                                               }
+                                       }
+
+                                       last_prune_call = Instant::now();
+                                       have_pruned = true;
                                }
                        }
 
@@ -273,10 +296,16 @@ impl BackgroundProcessor {
                        // ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
                        persister.persist_manager(&*channel_manager)?;
 
+                       // Persist Scorer on exit
+                       if let Some(ref scorer) = scorer {
+                               persister.persist_scorer(&scorer)?;
+                       }
+
                        // Persist NetworkGraph on exit
                        if let Some(ref handler) = net_graph_msg_handler {
                                persister.persist_graph(handler.network_graph())?;
                        }
+
                        Ok(())
                });
                Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
@@ -335,10 +364,9 @@ mod tests {
        use bitcoin::blockdata::constants::genesis_block;
        use bitcoin::blockdata::transaction::{Transaction, TxOut};
        use bitcoin::network::constants::Network;
-       use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
-       use lightning::chain::{BestBlock, Confirm, chainmonitor, self};
+       use lightning::chain::{BestBlock, Confirm, chainmonitor};
        use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
-       use lightning::chain::keysinterface::{InMemorySigner, Recipient, KeysInterface, KeysManager, Sign};
+       use lightning::chain::keysinterface::{InMemorySigner, Recipient, KeysInterface, KeysManager};
        use lightning::chain::transaction::OutPoint;
        use lightning::get_event_msg;
        use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, ChannelManager, SimpleArcChannelManager};
@@ -348,18 +376,17 @@ mod tests {
        use lightning::routing::network_graph::{NetworkGraph, NetGraphMsgHandler};
        use lightning::util::config::UserConfig;
        use lightning::util::events::{Event, MessageSendEventsProvider, MessageSendEvent};
-       use lightning::util::logger::Logger;
        use lightning::util::ser::Writeable;
        use lightning::util::test_utils;
        use lightning::util::persist::KVStorePersister;
        use lightning_invoice::payment::{InvoicePayer, RetryAttempts};
        use lightning_invoice::utils::DefaultRouter;
        use lightning_persister::FilesystemPersister;
-       use std::fs::{self, File};
-       use std::ops::Deref;
+       use std::fs;
        use std::path::PathBuf;
        use std::sync::{Arc, Mutex};
        use std::time::Duration;
+       use lightning::routing::scoring::{FixedPenaltyScorer};
        use super::{BackgroundProcessor, FRESHNESS_TIMER};
 
        const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
@@ -386,6 +413,7 @@ mod tests {
                network_graph: Arc<NetworkGraph>,
                logger: Arc<test_utils::TestLogger>,
                best_block: BestBlock,
+               scorer: Arc<Mutex<FixedPenaltyScorer>>,
        }
 
        impl Drop for Node {
@@ -399,16 +427,16 @@ mod tests {
        }
 
        struct Persister {
-               data_dir: String,
                graph_error: Option<(std::io::ErrorKind, &'static str)>,
                manager_error: Option<(std::io::ErrorKind, &'static str)>,
+               scorer_error: Option<(std::io::ErrorKind, &'static str)>,
                filesystem_persister: FilesystemPersister,
        }
 
        impl Persister {
                fn new(data_dir: String) -> Self {
                        let filesystem_persister = FilesystemPersister::new(data_dir.clone());
-                       Self { data_dir, graph_error: None, manager_error: None, filesystem_persister }
+                       Self { graph_error: None, manager_error: None, scorer_error: None, filesystem_persister }
                }
 
                fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
@@ -418,6 +446,10 @@ mod tests {
                fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
                        Self { manager_error: Some((error, message)), ..self }
                }
+
+               fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
+                       Self { scorer_error: Some((error, message)), ..self }
+               }
        }
 
        impl KVStorePersister for Persister {
@@ -434,6 +466,12 @@ mod tests {
                                }
                        }
 
+                       if key == "scorer" {
+                               if let Some((error, message)) = self.scorer_error {
+                                       return Err(std::io::Error::new(error, message))
+                               }
+                       }
+
                        self.filesystem_persister.persist(key, object)
                }
        }
@@ -465,7 +503,8 @@ mod tests {
                        let net_graph_msg_handler = Some(Arc::new(NetGraphMsgHandler::new(network_graph.clone(), Some(chain_source.clone()), logger.clone())));
                        let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new() )};
                        let peer_manager = Arc::new(PeerManager::new(msg_handler, keys_manager.get_node_secret(Recipient::Node).unwrap(), &seed, logger.clone(), IgnoringMessageHandler{}));
-                       let node = Node { node: manager, net_graph_msg_handler, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block };
+                       let scorer = Arc::new(Mutex::new(test_utils::TestScorer::with_penalty(0)));
+                       let node = Node { node: manager, net_graph_msg_handler, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
                        nodes.push(node);
                }
 
@@ -563,7 +602,7 @@ mod tests {
                let data_dir = nodes[0].persister.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir));
                let event_handler = |_: &_| {};
-               let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
+               let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
 
                macro_rules! check_persisted_data {
                        ($node: expr, $filepath: expr) => {
@@ -613,6 +652,10 @@ mod tests {
                        check_persisted_data!(network_graph, filepath.clone());
                }
 
+               // Check scorer is persisted
+               let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "scorer".to_string());
+               check_persisted_data!(nodes[0].scorer, filepath.clone());
+
                assert!(bg_processor.stop().is_ok());
        }
 
@@ -624,7 +667,7 @@ mod tests {
                let data_dir = nodes[0].persister.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir));
                let event_handler = |_: &_| {};
-               let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
+               let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
                loop {
                        let log_entries = nodes[0].logger.lines.lock().unwrap();
                        let desired_log = "Calling ChannelManager's timer_tick_occurred".to_string();
@@ -647,7 +690,7 @@ mod tests {
                let data_dir = nodes[0].persister.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
                let event_handler = |_: &_| {};
-               let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
+               let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
                match bg_processor.join() {
                        Ok(_) => panic!("Expected error persisting manager"),
                        Err(e) => {
@@ -664,7 +707,7 @@ mod tests {
                let data_dir = nodes[0].persister.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
                let event_handler = |_: &_| {};
-               let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
+               let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
 
                match bg_processor.stop() {
                        Ok(_) => panic!("Expected error persisting network graph"),
@@ -675,6 +718,24 @@ mod tests {
                }
        }
 
+       #[test]
+       fn test_scorer_persist_error() {
+               // Test that if we encounter an error during scorer persistence, an error gets returned.
+               let nodes = create_nodes(2, "test_persist_scorer_error".to_string());
+               let data_dir = nodes[0].persister.get_data_dir();
+               let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
+               let event_handler = |_: &_| {};
+               let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
+
+               match bg_processor.stop() {
+                       Ok(_) => panic!("Expected error persisting scorer"),
+                       Err(e) => {
+                               assert_eq!(e.kind(), std::io::ErrorKind::Other);
+                               assert_eq!(e.get_ref().unwrap().to_string(), "test");
+                       },
+               }
+       }
+
        #[test]
        fn test_background_event_handling() {
                let mut nodes = create_nodes(2, "test_background_event_handling".to_string());
@@ -687,7 +748,7 @@ mod tests {
                let event_handler = move |event: &Event| {
                        sender.send(handle_funding_generation_ready!(event, channel_value)).unwrap();
                };
-               let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
+               let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
 
                // Open a channel and check that the FundingGenerationReady event was handled.
                begin_open_channel!(nodes[0], nodes[1], channel_value);
@@ -712,7 +773,7 @@ mod tests {
                let (sender, receiver) = std::sync::mpsc::sync_channel(1);
                let event_handler = move |event: &Event| sender.send(event.clone()).unwrap();
                let persister = Arc::new(Persister::new(data_dir));
-               let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
+               let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
 
                // Force close the channel and check that the SpendableOutputs event was handled.
                nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
@@ -739,11 +800,10 @@ mod tests {
                // Initiate the background processors to watch each node.
                let data_dir = nodes[0].persister.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir));
-               let scorer = Arc::new(Mutex::new(test_utils::TestScorer::with_penalty(0)));
                let router = DefaultRouter::new(Arc::clone(&nodes[0].network_graph), Arc::clone(&nodes[0].logger), random_seed_bytes);
-               let invoice_payer = Arc::new(InvoicePayer::new(Arc::clone(&nodes[0].node), router, scorer, Arc::clone(&nodes[0].logger), |_: &_| {}, RetryAttempts(2)));
+               let invoice_payer = Arc::new(InvoicePayer::new(Arc::clone(&nodes[0].node), router, Arc::clone(&nodes[0].scorer), Arc::clone(&nodes[0].logger), |_: &_| {}, RetryAttempts(2)));
                let event_handler = Arc::clone(&invoice_payer);
-               let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
+               let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
                assert!(bg_processor.stop().is_ok());
        }
 }
index 673034081588e373b6590aff9a4db111987862af..1d6e7f40accd8410ac0c7c42bb78f56665da4b7c 100644 (file)
@@ -18,7 +18,7 @@ rest-client = [ "serde", "serde_json", "chunked_transfer" ]
 rpc-client = [ "serde", "serde_json", "chunked_transfer" ]
 
 [dependencies]
-bitcoin = "0.27"
+bitcoin = "0.28.1"
 lightning = { version = "0.0.106", path = "../lightning" }
 futures = { version = "0.3" }
 tokio = { version = "1.0", features = [ "io-util", "net", "time" ], optional = true }
index f5d839d21ca31931ca08199a24c00e8f14867f06..b3f745bd26e361e670e66436abb7b1dae7310979 100644 (file)
@@ -4,7 +4,7 @@
 use crate::{BlockSource, BlockSourceResult, Cache, ChainNotifier};
 use crate::poll::{ChainPoller, Validate, ValidatedBlockHeader};
 
-use bitcoin::blockdata::block::{Block, BlockHeader};
+use bitcoin::blockdata::block::BlockHeader;
 use bitcoin::hash_types::BlockHash;
 use bitcoin::network::constants::Network;
 
@@ -203,7 +203,7 @@ impl<'a, C: Cache> Cache for ReadOnlyCache<'a, C> {
 struct DynamicChainListener<'a, L: chain::Listen + ?Sized>(&'a L);
 
 impl<'a, L: chain::Listen + ?Sized> chain::Listen for DynamicChainListener<'a, L> {
-       fn block_connected(&self, _block: &Block, _height: u32) {
+       fn filtered_block_connected(&self, _header: &BlockHeader, _txdata: &chain::transaction::TransactionData, _height: u32) {
                unreachable!()
        }
 
@@ -216,10 +216,10 @@ impl<'a, L: chain::Listen + ?Sized> chain::Listen for DynamicChainListener<'a, L
 struct ChainListenerSet<'a, L: chain::Listen + ?Sized>(Vec<(u32, &'a L)>);
 
 impl<'a, L: chain::Listen + ?Sized> chain::Listen for ChainListenerSet<'a, L> {
-       fn block_connected(&self, block: &Block, height: u32) {
+       fn filtered_block_connected(&self, header: &BlockHeader, txdata: &chain::transaction::TransactionData, height: u32) {
                for (starting_height, chain_listener) in self.0.iter() {
                        if height > *starting_height {
-                               chain_listener.block_connected(block, height);
+                               chain_listener.filtered_block_connected(header, txdata, height);
                        }
                }
        }
index fe57c0c606d02d5cfbc5bf79f3bba1637904711f..baaab456b5adeb8fb25df64d6d306c011a0062dd 100644 (file)
@@ -6,6 +6,8 @@ use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::hash_types::BlockHash;
 use bitcoin::network::constants::Network;
 use bitcoin::util::uint::Uint256;
+use bitcoin::util::hash::bitcoin_merkle_root;
+use bitcoin::Transaction;
 
 use lightning::chain;
 
@@ -37,16 +39,27 @@ impl Blockchain {
                        let prev_block = &self.blocks[i - 1];
                        let prev_blockhash = prev_block.block_hash();
                        let time = prev_block.header.time + height as u32;
+                       // Must have at least one transaction, because the merkle root is not defined for an empty block
+                       // and we would fail when we later checked, as of bitcoin crate 0.28.0.
+                       // Note that elsewhere in tests we assume that the merkle root of an empty block is all zeros,
+                       // but that's OK because those tests don't trigger the check.
+                       let coinbase = Transaction {
+                               version: 0,
+                               lock_time: 0,
+                               input: vec![],
+                               output: vec![]
+                       };
+                       let merkle_root = bitcoin_merkle_root(vec![coinbase.txid().as_hash()].into_iter()).unwrap();
                        self.blocks.push(Block {
                                header: BlockHeader {
                                        version: 0,
                                        prev_blockhash,
-                                       merkle_root: Default::default(),
+                                       merkle_root: merkle_root.into(),
                                        time,
                                        bits,
                                        nonce: 0,
                                },
-                               txdata: vec![],
+                               txdata: vec![coinbase],
                        });
                }
                self
@@ -166,7 +179,7 @@ impl BlockSource for Blockchain {
 pub struct NullChainListener;
 
 impl chain::Listen for NullChainListener {
-       fn block_connected(&self, _block: &Block, _height: u32) {}
+       fn filtered_block_connected(&self, _header: &BlockHeader, _txdata: &chain::transaction::TransactionData, _height: u32) {}
        fn block_disconnected(&self, _header: &BlockHeader, _height: u32) {}
 }
 
@@ -195,13 +208,13 @@ impl MockChainListener {
 }
 
 impl chain::Listen for MockChainListener {
-       fn block_connected(&self, block: &Block, height: u32) {
+       fn filtered_block_connected(&self, header: &BlockHeader, _txdata: &chain::transaction::TransactionData, height: u32) {
                match self.expected_blocks_connected.borrow_mut().pop_front() {
                        None => {
-                               panic!("Unexpected block connected: {:?}", block.block_hash());
+                               panic!("Unexpected block connected: {:?}", header.block_hash());
                        },
                        Some(expected_block) => {
-                               assert_eq!(block.block_hash(), expected_block.header.block_hash());
+                               assert_eq!(header.block_hash(), expected_block.header.block_hash());
                                assert_eq!(height, expected_block.height);
                        },
                }
index 194f5f70e912627aed2123ad1a1d3aa68ea15051..f65d7ac886992dd488e7ed449486d755067d41ae 100644 (file)
@@ -20,7 +20,7 @@ std = ["bitcoin_hashes/std", "num-traits/std", "lightning/std", "bech32/std"]
 [dependencies]
 bech32 = { version = "0.8", default-features = false }
 lightning = { version = "0.0.106", path = "../lightning", default-features = false }
-secp256k1 = { version = "0.20", default-features = false, features = ["recovery", "alloc"] }
+secp256k1 = { version = "0.22", default-features = false, features = ["recovery", "alloc"] }
 num-traits = { version = "0.2.8", default-features = false }
 bitcoin_hashes = { version = "0.10", default-features = false }
 hashbrown = { version = "0.11", optional = true }
index 5de2b038e8b2d29a25df42705d5d7d378009a7fb..e9b639c101307dccd7e229caee1303c77a0bbca6 100644 (file)
@@ -19,8 +19,8 @@ use lightning::routing::router::{RouteHint, RouteHintHop};
 use num_traits::{CheckedAdd, CheckedMul};
 
 use secp256k1;
-use secp256k1::recovery::{RecoveryId, RecoverableSignature};
-use secp256k1::key::PublicKey;
+use secp256k1::ecdsa::{RecoveryId, RecoverableSignature};
+use secp256k1::PublicKey;
 
 use super::{Invoice, Sha256, TaggedField, ExpiryTime, MinFinalCltvExpiry, Fallback, PayeePubKey, InvoiceSignature, PositiveTimestamp,
        SemanticError, PrivateRoute, ParseError, ParseOrSemanticError, Description, RawTaggedField, Currency, RawHrp, SiPrefix, RawInvoice,
@@ -967,7 +967,7 @@ mod test {
        #[test]
        fn test_payment_secret_and_features_de_and_ser() {
                use lightning::ln::features::InvoiceFeatures;
-               use secp256k1::recovery::{RecoveryId, RecoverableSignature};
+               use secp256k1::ecdsa::{RecoveryId, RecoverableSignature};
                use TaggedField::*;
                use {SiPrefix, SignedRawInvoice, InvoiceSignature, RawInvoice, RawHrp, RawDataPart,
                                 Currency, Sha256, PositiveTimestamp};
@@ -1014,7 +1014,7 @@ mod test {
        #[test]
        fn test_raw_signed_invoice_deserialization() {
                use TaggedField::*;
-               use secp256k1::recovery::{RecoveryId, RecoverableSignature};
+               use secp256k1::ecdsa::{RecoveryId, RecoverableSignature};
                use {SignedRawInvoice, InvoiceSignature, RawInvoice, RawHrp, RawDataPart, Currency, Sha256,
                         PositiveTimestamp};
 
index 5784607dde937b85fa384ed65a630d7af1d3ca3a..616ea99f0fe253cf2275862b1a63e6f0db3232f2 100644 (file)
@@ -47,9 +47,9 @@ use lightning::routing::network_graph::RoutingFees;
 use lightning::routing::router::RouteHint;
 use lightning::util::invoice::construct_invoice_preimage;
 
-use secp256k1::key::PublicKey;
+use secp256k1::PublicKey;
 use secp256k1::{Message, Secp256k1};
-use secp256k1::recovery::RecoverableSignature;
+use secp256k1::ecdsa::RecoverableSignature;
 
 use core::fmt::{Display, Formatter, self};
 use core::iter::FilterMap;
@@ -163,7 +163,7 @@ pub const DEFAULT_MIN_FINAL_CLTV_EXPIRY: u64 = 18;
 /// use bitcoin_hashes::sha256;
 ///
 /// use secp256k1::Secp256k1;
-/// use secp256k1::key::SecretKey;
+/// use secp256k1::SecretKey;
 ///
 /// use lightning::ln::PaymentSecret;
 ///
@@ -191,7 +191,7 @@ pub const DEFAULT_MIN_FINAL_CLTV_EXPIRY: u64 = 18;
 ///    .current_timestamp()
 ///    .min_final_cltv_expiry(144)
 ///    .build_signed(|hash| {
-///            Secp256k1::new().sign_recoverable(hash, &private_key)
+///            Secp256k1::new().sign_ecdsa_recoverable(hash, &private_key)
 ///    })
 ///    .unwrap();
 ///
@@ -749,7 +749,7 @@ impl SignedRawInvoice {
                let hash = Message::from_slice(&self.hash[..])
                        .expect("Hash is 32 bytes long, same as MESSAGE_SIZE");
 
-               Ok(PayeePubKey(Secp256k1::new().recover(
+               Ok(PayeePubKey(Secp256k1::new().recover_ecdsa(
                        &hash,
                        &self.signature
                )?))
@@ -776,7 +776,7 @@ impl SignedRawInvoice {
                        .expect("Hash is 32 bytes long, same as MESSAGE_SIZE");
 
                let secp_context = Secp256k1::new();
-               let verification_result = secp_context.verify(
+               let verification_result = secp_context.verify_ecdsa(
                        &hash,
                        &self.signature.to_standard(),
                        pub_key
@@ -1576,8 +1576,8 @@ mod test {
        fn test_check_signature() {
                use TaggedField::*;
                use secp256k1::Secp256k1;
-               use secp256k1::recovery::{RecoveryId, RecoverableSignature};
-               use secp256k1::key::{SecretKey, PublicKey};
+               use secp256k1::ecdsa::{RecoveryId, RecoverableSignature};
+               use secp256k1::{SecretKey, PublicKey};
                use {SignedRawInvoice, InvoiceSignature, RawInvoice, RawHrp, RawDataPart, Currency, Sha256,
                         PositiveTimestamp};
 
@@ -1635,7 +1635,7 @@ mod test {
 
                let (raw_invoice, _, _) = invoice.into_parts();
                let new_signed = raw_invoice.sign::<_, ()>(|hash| {
-                       Ok(Secp256k1::new().sign_recoverable(hash, &private_key))
+                       Ok(Secp256k1::new().sign_ecdsa_recoverable(hash, &private_key))
                }).unwrap();
 
                assert!(new_signed.check_signature());
@@ -1646,7 +1646,7 @@ mod test {
                use TaggedField::*;
                use lightning::ln::features::InvoiceFeatures;
                use secp256k1::Secp256k1;
-               use secp256k1::key::SecretKey;
+               use secp256k1::SecretKey;
                use {RawInvoice, RawHrp, RawDataPart, Currency, Sha256, PositiveTimestamp, Invoice,
                         SemanticError};
 
@@ -1677,7 +1677,7 @@ mod test {
                let invoice = {
                        let mut invoice = invoice_template.clone();
                        invoice.data.tagged_fields.push(PaymentSecret(payment_secret).into());
-                       invoice.sign::<_, ()>(|hash| Ok(Secp256k1::new().sign_recoverable(hash, &private_key)))
+                       invoice.sign::<_, ()>(|hash| Ok(Secp256k1::new().sign_ecdsa_recoverable(hash, &private_key)))
                }.unwrap();
                assert_eq!(Invoice::from_signed(invoice), Err(SemanticError::InvalidFeatures));
 
@@ -1686,7 +1686,7 @@ mod test {
                        let mut invoice = invoice_template.clone();
                        invoice.data.tagged_fields.push(PaymentSecret(payment_secret).into());
                        invoice.data.tagged_fields.push(Features(InvoiceFeatures::empty()).into());
-                       invoice.sign::<_, ()>(|hash| Ok(Secp256k1::new().sign_recoverable(hash, &private_key)))
+                       invoice.sign::<_, ()>(|hash| Ok(Secp256k1::new().sign_ecdsa_recoverable(hash, &private_key)))
                }.unwrap();
                assert_eq!(Invoice::from_signed(invoice), Err(SemanticError::InvalidFeatures));
 
@@ -1695,14 +1695,14 @@ mod test {
                        let mut invoice = invoice_template.clone();
                        invoice.data.tagged_fields.push(PaymentSecret(payment_secret).into());
                        invoice.data.tagged_fields.push(Features(InvoiceFeatures::known()).into());
-                       invoice.sign::<_, ()>(|hash| Ok(Secp256k1::new().sign_recoverable(hash, &private_key)))
+                       invoice.sign::<_, ()>(|hash| Ok(Secp256k1::new().sign_ecdsa_recoverable(hash, &private_key)))
                }.unwrap();
                assert!(Invoice::from_signed(invoice).is_ok());
 
                // No payment secret or features
                let invoice = {
                        let invoice = invoice_template.clone();
-                       invoice.sign::<_, ()>(|hash| Ok(Secp256k1::new().sign_recoverable(hash, &private_key)))
+                       invoice.sign::<_, ()>(|hash| Ok(Secp256k1::new().sign_ecdsa_recoverable(hash, &private_key)))
                }.unwrap();
                assert_eq!(Invoice::from_signed(invoice), Err(SemanticError::NoPaymentSecret));
 
@@ -1710,7 +1710,7 @@ mod test {
                let invoice = {
                        let mut invoice = invoice_template.clone();
                        invoice.data.tagged_fields.push(Features(InvoiceFeatures::empty()).into());
-                       invoice.sign::<_, ()>(|hash| Ok(Secp256k1::new().sign_recoverable(hash, &private_key)))
+                       invoice.sign::<_, ()>(|hash| Ok(Secp256k1::new().sign_ecdsa_recoverable(hash, &private_key)))
                }.unwrap();
                assert_eq!(Invoice::from_signed(invoice), Err(SemanticError::NoPaymentSecret));
 
@@ -1718,7 +1718,7 @@ mod test {
                let invoice = {
                        let mut invoice = invoice_template.clone();
                        invoice.data.tagged_fields.push(Features(InvoiceFeatures::known()).into());
-                       invoice.sign::<_, ()>(|hash| Ok(Secp256k1::new().sign_recoverable(hash, &private_key)))
+                       invoice.sign::<_, ()>(|hash| Ok(Secp256k1::new().sign_ecdsa_recoverable(hash, &private_key)))
                }.unwrap();
                assert_eq!(Invoice::from_signed(invoice), Err(SemanticError::NoPaymentSecret));
 
@@ -1727,7 +1727,7 @@ mod test {
                        let mut invoice = invoice_template.clone();
                        invoice.data.tagged_fields.push(PaymentSecret(payment_secret).into());
                        invoice.data.tagged_fields.push(PaymentSecret(payment_secret).into());
-                       invoice.sign::<_, ()>(|hash| Ok(Secp256k1::new().sign_recoverable(hash, &private_key)))
+                       invoice.sign::<_, ()>(|hash| Ok(Secp256k1::new().sign_ecdsa_recoverable(hash, &private_key)))
                }.unwrap();
                assert_eq!(Invoice::from_signed(invoice), Err(SemanticError::MultiplePaymentSecrets));
        }
@@ -1764,7 +1764,7 @@ mod test {
                use ::*;
                use lightning::routing::router::RouteHintHop;
                use std::iter::FromIterator;
-               use secp256k1::key::PublicKey;
+               use secp256k1::PublicKey;
 
                let builder = InvoiceBuilder::new(Currency::Bitcoin)
                        .payment_hash(sha256::Hash::from_slice(&[0;32][..]).unwrap())
@@ -1818,7 +1818,7 @@ mod test {
                use ::*;
                use lightning::routing::router::RouteHintHop;
                use secp256k1::Secp256k1;
-               use secp256k1::key::{SecretKey, PublicKey};
+               use secp256k1::{SecretKey, PublicKey};
                use std::time::{UNIX_EPOCH, Duration};
 
                let secp_ctx = Secp256k1::new();
@@ -1897,7 +1897,7 @@ mod test {
                        .basic_mpp();
 
                let invoice = builder.clone().build_signed(|hash| {
-                       secp_ctx.sign_recoverable(hash, &private_key)
+                       secp_ctx.sign_ecdsa_recoverable(hash, &private_key)
                }).unwrap();
 
                assert!(invoice.check_signature().is_ok());
@@ -1932,7 +1932,7 @@ mod test {
        fn test_default_values() {
                use ::*;
                use secp256k1::Secp256k1;
-               use secp256k1::key::SecretKey;
+               use secp256k1::SecretKey;
 
                let signed_invoice = InvoiceBuilder::new(Currency::Bitcoin)
                        .description("Test".into())
@@ -1944,7 +1944,7 @@ mod test {
                        .sign::<_, ()>(|hash| {
                                let privkey = SecretKey::from_slice(&[41; 32]).unwrap();
                                let secp_ctx = Secp256k1::new();
-                               Ok(secp_ctx.sign_recoverable(hash, &privkey))
+                               Ok(secp_ctx.sign_ecdsa_recoverable(hash, &privkey))
                        })
                        .unwrap();
                let invoice = Invoice::from_signed(signed_invoice).unwrap();
@@ -1958,7 +1958,7 @@ mod test {
        fn test_expiration() {
                use ::*;
                use secp256k1::Secp256k1;
-               use secp256k1::key::SecretKey;
+               use secp256k1::SecretKey;
 
                let signed_invoice = InvoiceBuilder::new(Currency::Bitcoin)
                        .description("Test".into())
@@ -1970,7 +1970,7 @@ mod test {
                        .sign::<_, ()>(|hash| {
                                let privkey = SecretKey::from_slice(&[41; 32]).unwrap();
                                let secp_ctx = Secp256k1::new();
-                               Ok(secp_ctx.sign_recoverable(hash, &privkey))
+                               Ok(secp_ctx.sign_ecdsa_recoverable(hash, &privkey))
                        })
                        .unwrap();
                let invoice = Invoice::from_signed(signed_invoice).unwrap();
index 82c07199f0db871c1498fb64a21e63603aec2084..6b79a0123d9e872cc3dcebdea7905c5b6d8613b2 100644 (file)
@@ -46,7 +46,7 @@
 //! # use lightning::util::ser::{Writeable, Writer};
 //! # use lightning_invoice::Invoice;
 //! # use lightning_invoice::payment::{InvoicePayer, Payer, RetryAttempts, Router};
-//! # use secp256k1::key::PublicKey;
+//! # use secp256k1::PublicKey;
 //! # use std::cell::RefCell;
 //! # use std::ops::Deref;
 //! #
@@ -148,7 +148,7 @@ use lightning::util::events::{Event, EventHandler};
 use lightning::util::logger::Logger;
 use crate::sync::Mutex;
 
-use secp256k1::key::PublicKey;
+use secp256k1::PublicKey;
 
 use core::ops::Deref;
 use core::time::Duration;
@@ -555,7 +555,7 @@ mod tests {
                        .min_final_cltv_expiry(144)
                        .amount_milli_satoshis(128)
                        .build_signed(|hash| {
-                               Secp256k1::new().sign_recoverable(hash, &private_key)
+                               Secp256k1::new().sign_ecdsa_recoverable(hash, &private_key)
                        })
                        .unwrap()
        }
@@ -580,7 +580,7 @@ mod tests {
                        .duration_since_epoch(duration_since_epoch())
                        .min_final_cltv_expiry(144)
                        .build_signed(|hash| {
-                               Secp256k1::new().sign_recoverable(hash, &private_key)
+                               Secp256k1::new().sign_ecdsa_recoverable(hash, &private_key)
                        })
                        .unwrap()
        }
@@ -600,7 +600,7 @@ mod tests {
                        .min_final_cltv_expiry(144)
                        .amount_milli_satoshis(128)
                        .build_signed(|hash| {
-                               Secp256k1::new().sign_recoverable(hash, &private_key)
+                               Secp256k1::new().sign_ecdsa_recoverable(hash, &private_key)
                        })
                        .unwrap()
        }
@@ -1558,7 +1558,7 @@ mod tests {
 
                assert!(invoice_payer.pay_invoice(&create_invoice_from_channelmanager_and_duration_since_epoch(
                        &nodes[1].node, nodes[1].keys_manager, Currency::Bitcoin, Some(100_010_000), "Invoice".to_string(),
-                       duration_since_epoch()).unwrap())
+                       duration_since_epoch(), 3600).unwrap())
                        .is_ok());
                let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events();
                assert_eq!(htlc_msgs.len(), 2);
@@ -1604,7 +1604,7 @@ mod tests {
 
                assert!(invoice_payer.pay_invoice(&create_invoice_from_channelmanager_and_duration_since_epoch(
                        &nodes[1].node, nodes[1].keys_manager, Currency::Bitcoin, Some(100_010_000), "Invoice".to_string(),
-                       duration_since_epoch()).unwrap())
+                       duration_since_epoch(), 3600).unwrap())
                        .is_ok());
                let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events();
                assert_eq!(htlc_msgs.len(), 2);
@@ -1686,7 +1686,7 @@ mod tests {
 
                assert!(invoice_payer.pay_invoice(&create_invoice_from_channelmanager_and_duration_since_epoch(
                        &nodes[1].node, nodes[1].keys_manager, Currency::Bitcoin, Some(100_010_000), "Invoice".to_string(),
-                       duration_since_epoch()).unwrap())
+                       duration_since_epoch(), 3600).unwrap())
                        .is_ok());
                let htlc_updates = SendEvent::from_node(&nodes[0]);
                check_added_monitors!(nodes[0], 1);
index a2edc43afc7a17b02f1b7eb13906e9d7837f874e..9b3b41afbedae5f938c01968f221a843870d376c 100644 (file)
@@ -1,6 +1,6 @@
 //! Convenient utilities to create an invoice.
 
-use {CreationError, Currency, DEFAULT_EXPIRY_TIME, Invoice, InvoiceBuilder, SignOrCreationError};
+use {CreationError, Currency, Invoice, InvoiceBuilder, SignOrCreationError};
 use payment::{Payer, Router};
 
 use crate::{prelude::*, Description, InvoiceDescription, Sha256};
@@ -19,8 +19,7 @@ use lightning::routing::scoring::Score;
 use lightning::routing::network_graph::{NetworkGraph, RoutingFees};
 use lightning::routing::router::{Route, RouteHint, RouteHintHop, RouteParameters, find_route};
 use lightning::util::logger::Logger;
-use secp256k1::key::PublicKey;
-use core::convert::TryInto;
+use secp256k1::PublicKey;
 use core::ops::Deref;
 use core::time::Duration;
 use sync::Mutex;
@@ -162,7 +161,8 @@ fn _create_phantom_invoice<Signer: Sign, K: Deref>(
                .current_timestamp()
                .payment_hash(Hash::from_slice(&payment_hash.0).unwrap())
                .payment_secret(payment_secret)
-               .min_final_cltv_expiry(MIN_FINAL_CLTV_EXPIRY.into());
+               .min_final_cltv_expiry(MIN_FINAL_CLTV_EXPIRY.into())
+               .expiry_time(Duration::from_secs(invoice_expiry_delta_secs.into()));
        if let Some(amt) = amt_msat {
                invoice = invoice.amount_milli_satoshis(amt);
        }
@@ -212,9 +212,12 @@ fn _create_phantom_invoice<Signer: Sign, K: Deref>(
 /// method stores the invoice's payment secret and preimage in `ChannelManager`, so (a) the user
 /// doesn't have to store preimage/payment secret information and (b) `ChannelManager` can verify
 /// that the payment secret is valid when the invoice is paid.
+///
+/// `invoice_expiry_delta_secs` describes the number of seconds that the invoice is valid for
+/// in excess of the current time.
 pub fn create_invoice_from_channelmanager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>(
        channelmanager: &ChannelManager<Signer, M, T, K, F, L>, keys_manager: K, network: Currency,
-       amt_msat: Option<u64>, description: String
+       amt_msat: Option<u64>, description: String, invoice_expiry_delta_secs: u32
 ) -> Result<Invoice, SignOrCreationError<()>>
 where
        M::Target: chain::Watch<Signer>,
@@ -227,7 +230,8 @@ where
        let duration = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
                .expect("for the foreseeable future this shouldn't happen");
        create_invoice_from_channelmanager_and_duration_since_epoch(
-               channelmanager, keys_manager, network, amt_msat, description, duration
+               channelmanager, keys_manager, network, amt_msat, description, duration,
+               invoice_expiry_delta_secs
        )
 }
 
@@ -238,9 +242,12 @@ where
 /// doesn't have to store preimage/payment secret information and (b) `ChannelManager` can verify
 /// that the payment secret is valid when the invoice is paid.
 /// Use this variant if you want to pass the `description_hash` to the invoice.
+///
+/// `invoice_expiry_delta_secs` describes the number of seconds that the invoice is valid for
+/// in excess of the current time.
 pub fn create_invoice_from_channelmanager_with_description_hash<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>(
        channelmanager: &ChannelManager<Signer, M, T, K, F, L>, keys_manager: K, network: Currency,
-       amt_msat: Option<u64>, description_hash: Sha256,
+       amt_msat: Option<u64>, description_hash: Sha256, invoice_expiry_delta_secs: u32
 ) -> Result<Invoice, SignOrCreationError<()>>
 where
        M::Target: chain::Watch<Signer>,
@@ -256,7 +263,8 @@ where
                .expect("for the foreseeable future this shouldn't happen");
 
        create_invoice_from_channelmanager_with_description_hash_and_duration_since_epoch(
-               channelmanager, keys_manager, network, amt_msat, description_hash, duration,
+               channelmanager, keys_manager, network, amt_msat,
+               description_hash, duration, invoice_expiry_delta_secs
        )
 }
 
@@ -266,6 +274,7 @@ where
 pub fn create_invoice_from_channelmanager_with_description_hash_and_duration_since_epoch<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>(
        channelmanager: &ChannelManager<Signer, M, T, K, F, L>, keys_manager: K, network: Currency,
        amt_msat: Option<u64>, description_hash: Sha256, duration_since_epoch: Duration,
+       invoice_expiry_delta_secs: u32
 ) -> Result<Invoice, SignOrCreationError<()>>
 where
        M::Target: chain::Watch<Signer>,
@@ -277,7 +286,7 @@ where
        _create_invoice_from_channelmanager_and_duration_since_epoch(
                channelmanager, keys_manager, network, amt_msat,
                InvoiceDescription::Hash(&description_hash),
-               duration_since_epoch,
+               duration_since_epoch, invoice_expiry_delta_secs
        )
 }
 
@@ -287,6 +296,7 @@ where
 pub fn create_invoice_from_channelmanager_and_duration_since_epoch<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>(
        channelmanager: &ChannelManager<Signer, M, T, K, F, L>, keys_manager: K, network: Currency,
        amt_msat: Option<u64>, description: String, duration_since_epoch: Duration,
+       invoice_expiry_delta_secs: u32
 ) -> Result<Invoice, SignOrCreationError<()>>
 where
        M::Target: chain::Watch<Signer>,
@@ -300,13 +310,14 @@ where
                InvoiceDescription::Direct(
                        &Description::new(description).map_err(SignOrCreationError::CreationError)?,
                ),
-               duration_since_epoch,
+               duration_since_epoch, invoice_expiry_delta_secs
        )
 }
 
 fn _create_invoice_from_channelmanager_and_duration_since_epoch<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>(
        channelmanager: &ChannelManager<Signer, M, T, K, F, L>, keys_manager: K, network: Currency,
        amt_msat: Option<u64>, description: InvoiceDescription, duration_since_epoch: Duration,
+       invoice_expiry_delta_secs: u32
 ) -> Result<Invoice, SignOrCreationError<()>>
 where
        M::Target: chain::Watch<Signer>,
@@ -320,7 +331,7 @@ where
        // `create_inbound_payment` only returns an error if the amount is greater than the total bitcoin
        // supply.
        let (payment_hash, payment_secret) = channelmanager
-               .create_inbound_payment(amt_msat, DEFAULT_EXPIRY_TIME.try_into().unwrap())
+               .create_inbound_payment(amt_msat, invoice_expiry_delta_secs)
                .map_err(|()| SignOrCreationError::CreationError(CreationError::InvalidAmount))?;
        let our_node_pubkey = channelmanager.get_our_node_id();
 
@@ -337,7 +348,8 @@ where
                .payment_hash(Hash::from_slice(&payment_hash.0).unwrap())
                .payment_secret(payment_secret)
                .basic_mpp()
-               .min_final_cltv_expiry(MIN_FINAL_CLTV_EXPIRY.into());
+               .min_final_cltv_expiry(MIN_FINAL_CLTV_EXPIRY.into())
+               .expiry_time(Duration::from_secs(invoice_expiry_delta_secs.into()));
        if let Some(amt) = amt_msat {
                invoice = invoice.amount_milli_satoshis(amt);
        }
@@ -526,12 +538,14 @@ mod test {
                let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
                create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+               let non_default_invoice_expiry_secs = 4200;
                let invoice = create_invoice_from_channelmanager_and_duration_since_epoch(
                        &nodes[1].node, nodes[1].keys_manager, Currency::BitcoinTestnet, Some(10_000), "test".to_string(),
-                       Duration::from_secs(1234567)).unwrap();
+                       Duration::from_secs(1234567), non_default_invoice_expiry_secs).unwrap();
                assert_eq!(invoice.amount_pico_btc(), Some(100_000));
                assert_eq!(invoice.min_final_cltv_expiry(), MIN_FINAL_CLTV_EXPIRY as u64);
                assert_eq!(invoice.description(), InvoiceDescription::Direct(&Description("test".to_string())));
+               assert_eq!(invoice.expiry_time(), Duration::from_secs(non_default_invoice_expiry_secs.into()));
 
                // Invoice SCIDs should always use inbound SCID aliases over the real channel ID, if one is
                // available.
@@ -592,7 +606,7 @@ mod test {
                let description_hash = crate::Sha256(Hash::hash("Testing description_hash".as_bytes()));
                let invoice = ::utils::create_invoice_from_channelmanager_with_description_hash_and_duration_since_epoch(
                        &nodes[1].node, nodes[1].keys_manager, Currency::BitcoinTestnet, Some(10_000),
-                       description_hash, Duration::from_secs(1234567),
+                       description_hash, Duration::from_secs(1234567), 3600
                ).unwrap();
                assert_eq!(invoice.amount_pico_btc(), Some(100_000));
                assert_eq!(invoice.min_final_cltv_expiry(), MIN_FINAL_CLTV_EXPIRY as u64);
@@ -752,7 +766,7 @@ mod test {
        ) {
                let invoice = create_invoice_from_channelmanager_and_duration_since_epoch(
                        &invoice_node.node, invoice_node.keys_manager, Currency::BitcoinTestnet, invoice_amt, "test".to_string(),
-                       Duration::from_secs(1234567)).unwrap();
+                       Duration::from_secs(1234567), 3600).unwrap();
                let hints = invoice.private_routes();
 
                for hint in hints {
@@ -799,8 +813,13 @@ mod test {
                } else {
                        None
                };
+               let non_default_invoice_expiry_secs = 4200;
 
-               let invoice = ::utils::create_phantom_invoice::<EnforcingSigner, &test_utils::TestKeysInterface>(Some(payment_amt), payment_hash, "test".to_string(), 3600, route_hints, &nodes[1].keys_manager, Currency::BitcoinTestnet).unwrap();
+               let invoice =
+                       ::utils::create_phantom_invoice::<EnforcingSigner, &test_utils::TestKeysInterface>(
+                               Some(payment_amt), payment_hash, "test".to_string(), non_default_invoice_expiry_secs,
+                               route_hints, &nodes[1].keys_manager, Currency::BitcoinTestnet
+                       ).unwrap();
                let (payment_hash, payment_secret) = (PaymentHash(invoice.payment_hash().into_inner()), *invoice.payment_secret());
                let payment_preimage = if user_generated_pmt_hash {
                        user_payment_preimage
@@ -811,6 +830,7 @@ mod test {
                assert_eq!(invoice.min_final_cltv_expiry(), MIN_FINAL_CLTV_EXPIRY as u64);
                assert_eq!(invoice.description(), InvoiceDescription::Direct(&Description("test".to_string())));
                assert_eq!(invoice.route_hints().len(), 2);
+               assert_eq!(invoice.expiry_time(), Duration::from_secs(non_default_invoice_expiry_secs.into()));
                assert!(!invoice.features().unwrap().supports_basic_mpp());
 
                let payment_params = PaymentParameters::from_node_id(invoice.recover_payee_pub_key())
@@ -931,10 +951,17 @@ mod test {
                ];
 
                let description_hash = crate::Sha256(Hash::hash("Description hash phantom invoice".as_bytes()));
-               let invoice = ::utils::create_phantom_invoice_with_description_hash::<EnforcingSigner,&test_utils::TestKeysInterface>(Some(payment_amt), None, 3600, description_hash, route_hints, &nodes[1].keys_manager, Currency::BitcoinTestnet).unwrap();
-
+               let non_default_invoice_expiry_secs = 4200;
+               let invoice = ::utils::create_phantom_invoice_with_description_hash::<
+                       EnforcingSigner, &test_utils::TestKeysInterface,
+               >(
+                       Some(payment_amt), None, non_default_invoice_expiry_secs, description_hash,
+                       route_hints, &nodes[1].keys_manager, Currency::BitcoinTestnet
+               )
+               .unwrap();
                assert_eq!(invoice.amount_pico_btc(), Some(200_000));
                assert_eq!(invoice.min_final_cltv_expiry(), MIN_FINAL_CLTV_EXPIRY as u64);
+               assert_eq!(invoice.expiry_time(), Duration::from_secs(non_default_invoice_expiry_secs.into()));
                assert_eq!(invoice.description(), InvoiceDescription::Hash(&crate::Sha256(Sha256::hash("Description hash phantom invoice".as_bytes()))));
        }
 
index 1eaeb31378513065e74a7c4958e9b8e0c8befa8e..1d9c481513dd98944514fd25ea2f7601b2e935a2 100644 (file)
@@ -13,7 +13,7 @@ use lightning::routing::router::{RouteHint, RouteHintHop};
 use lightning::routing::network_graph::RoutingFees;
 use lightning_invoice::*;
 use secp256k1::PublicKey;
-use secp256k1::recovery::{RecoverableSignature, RecoveryId};
+use secp256k1::ecdsa::{RecoverableSignature, RecoveryId};
 use std::collections::HashSet;
 use std::time::Duration;
 use std::str::FromStr;
index 40734ff28c3ab3088d3276f3056d01c9d7bf83d9..08c649f79bc1f885e3dabe06a17c7b250733cdb7 100644 (file)
@@ -15,7 +15,7 @@ all-features = true
 rustdoc-args = ["--cfg", "docsrs"]
 
 [dependencies]
-bitcoin = "0.27"
+bitcoin = "0.28.1"
 lightning = { version = "0.0.106", path = "../lightning" }
 tokio = { version = "1.0", features = [ "io-util", "macros", "rt", "sync", "net", "time" ] }
 
index a9fd861bc846e440fd8ff54ab31deedf622aa8e6..f7e42b6634147d405e2cfebe1278801940e0a771 100644 (file)
@@ -23,7 +23,7 @@
 //! # Example
 //! ```
 //! use std::net::TcpStream;
-//! use bitcoin::secp256k1::key::PublicKey;
+//! use bitcoin::secp256k1::PublicKey;
 //! use lightning::util::events::{Event, EventHandler, EventsProvider};
 //! use std::net::SocketAddr;
 //! use std::sync::Arc;
@@ -71,7 +71,7 @@
 
 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
 
-use bitcoin::secp256k1::key::PublicKey;
+use bitcoin::secp256k1::PublicKey;
 
 use tokio::net::TcpStream;
 use tokio::{io, time};
@@ -85,7 +85,6 @@ use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, NetAddre
 use lightning::util::logger::Logger;
 
 use std::task;
-use std::net::IpAddr;
 use std::net::SocketAddr;
 use std::net::TcpStream as StdTcpStream;
 use std::sync::{Arc, Mutex};
@@ -121,11 +120,28 @@ struct Connection {
        id: u64,
 }
 impl Connection {
+       async fn poll_event_process<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, mut event_receiver: mpsc::Receiver<()>) where
+                       CMH: ChannelMessageHandler + 'static + Send + Sync,
+                       RMH: RoutingMessageHandler + 'static + Send + Sync,
+                       L: Logger + 'static + ?Sized + Send + Sync,
+                       UMH: CustomMessageHandler + 'static + Send + Sync {
+               loop {
+                       if event_receiver.recv().await.is_none() {
+                               return;
+                       }
+                       peer_manager.process_events();
+               }
+       }
+
        async fn schedule_read<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManager<SocketDescriptor, Arc<CMH>, Arc<RMH>, Arc<L>, Arc<UMH>>>, us: Arc<Mutex<Self>>, mut reader: io::ReadHalf<TcpStream>, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>) where
-                       CMH: ChannelMessageHandler + 'static,
-                       RMH: RoutingMessageHandler + 'static,
-                       L: Logger + 'static + ?Sized,
-                       UMH: CustomMessageHandler + 'static {
+                       CMH: ChannelMessageHandler + 'static + Send + Sync,
+                       RMH: RoutingMessageHandler + 'static + Send + Sync,
+                       L: Logger + 'static + ?Sized + Send + Sync,
+                       UMH: CustomMessageHandler + 'static + Send + Sync {
+               // Create a waker to wake up poll_event_process, above
+               let (event_waker, event_receiver) = mpsc::channel(1);
+               tokio::spawn(Self::poll_event_process(Arc::clone(&peer_manager), event_receiver));
+
                // 8KB is nice and big but also should never cause any issues with stack overflowing.
                let mut buf = [0; 8192];
 
@@ -176,7 +192,14 @@ impl Connection {
                                        Err(_) => break Disconnect::PeerDisconnected,
                                },
                        }
-                       peer_manager.process_events();
+                       let _ = event_waker.try_send(());
+
+                       // At this point we've processed a message or two, and reset the ping timer for this
+                       // peer, at least in the "are we still receiving messages" context, if we don't give up
+                       // our timeslice to another task we may just spin on this peer, starving other peers
+                       // and eventually disconnecting them for ping timeouts. Instead, we explicitly yield
+                       // here.
+                       tokio::task::yield_now().await;
                };
                let writer_option = us.lock().unwrap().writer.take();
                if let Some(mut writer) = writer_option {
@@ -212,6 +235,20 @@ impl Connection {
        }
 }
 
+fn get_addr_from_stream(stream: &StdTcpStream) -> Option<NetAddress> {
+       match stream.peer_addr() {
+               Ok(SocketAddr::V4(sockaddr)) => Some(NetAddress::IPv4 {
+                       addr: sockaddr.ip().octets(),
+                       port: sockaddr.port(),
+               }),
+               Ok(SocketAddr::V6(sockaddr)) => Some(NetAddress::IPv6 {
+                       addr: sockaddr.ip().octets(),
+                       port: sockaddr.port(),
+               }),
+               Err(_) => None,
+       }
+}
+
 /// Process incoming messages and feed outgoing messages on the provided socket generated by
 /// accepting an incoming connection.
 ///
@@ -223,21 +260,12 @@ pub fn setup_inbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerManag
                RMH: RoutingMessageHandler + 'static + Send + Sync,
                L: Logger + 'static + ?Sized + Send + Sync,
                UMH: CustomMessageHandler + 'static + Send + Sync {
-       let ip_addr = stream.peer_addr().unwrap();
+       let remote_addr = get_addr_from_stream(&stream);
        let (reader, write_receiver, read_receiver, us) = Connection::new(stream);
        #[cfg(debug_assertions)]
        let last_us = Arc::clone(&us);
 
-       let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), match ip_addr.ip() {
-               IpAddr::V4(ip) => Some(NetAddress::IPv4 {
-                       addr: ip.octets(),
-                       port: ip_addr.port(),
-               }),
-               IpAddr::V6(ip) => Some(NetAddress::IPv6 {
-                       addr: ip.octets(),
-                       port: ip_addr.port(),
-               }),
-       }) {
+       let handle_opt = if let Ok(_) = peer_manager.new_inbound_connection(SocketDescriptor::new(us.clone()), remote_addr) {
                Some(tokio::spawn(Connection::schedule_read(peer_manager, us, reader, read_receiver, write_receiver)))
        } else {
                // Note that we will skip socket_disconnected here, in accordance with the PeerManager
@@ -274,20 +302,11 @@ pub fn setup_outbound<CMH, RMH, L, UMH>(peer_manager: Arc<peer_handler::PeerMana
                RMH: RoutingMessageHandler + 'static + Send + Sync,
                L: Logger + 'static + ?Sized + Send + Sync,
                UMH: CustomMessageHandler + 'static + Send + Sync {
-       let ip_addr = stream.peer_addr().unwrap();
+       let remote_addr = get_addr_from_stream(&stream);
        let (reader, mut write_receiver, read_receiver, us) = Connection::new(stream);
        #[cfg(debug_assertions)]
        let last_us = Arc::clone(&us);
-       let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), match ip_addr.ip() {
-               IpAddr::V4(ip) => Some(NetAddress::IPv4 {
-                       addr: ip.octets(),
-                       port: ip_addr.port(),
-               }),
-               IpAddr::V6(ip) => Some(NetAddress::IPv6 {
-                       addr: ip.octets(),
-                       port: ip_addr.port(),
-               }),
-       }) {
+       let handle_opt = if let Ok(initial_send) = peer_manager.new_outbound_connection(their_node_id, SocketDescriptor::new(us.clone()), remote_addr) {
                Some(tokio::spawn(async move {
                        // We should essentially always have enough room in a TCP socket buffer to send the
                        // initial 10s of bytes. However, tokio running in single-threaded mode will always
@@ -448,6 +467,9 @@ impl peer_handler::SocketDescriptor for SocketDescriptor {
                                        // pause read given we're now waiting on the remote end to ACK (and in
                                        // accordance with the send_data() docs).
                                        us.read_paused = true;
+                                       // Further, to avoid any current pending read causing a `read_event` call, wake
+                                       // up the read_waker and restart its loop.
+                                       let _ = us.read_waker.try_send(());
                                        return written_len;
                                },
                        }
@@ -561,6 +583,22 @@ mod tests {
                }
        }
 
+       fn make_tcp_connection() -> (std::net::TcpStream, std::net::TcpStream) {
+               if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9735") {
+                       (std::net::TcpStream::connect("127.0.0.1:9735").unwrap(), listener.accept().unwrap().0)
+               } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:19735") {
+                       (std::net::TcpStream::connect("127.0.0.1:19735").unwrap(), listener.accept().unwrap().0)
+               } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9997") {
+                       (std::net::TcpStream::connect("127.0.0.1:9997").unwrap(), listener.accept().unwrap().0)
+               } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9998") {
+                       (std::net::TcpStream::connect("127.0.0.1:9998").unwrap(), listener.accept().unwrap().0)
+               } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9999") {
+                       (std::net::TcpStream::connect("127.0.0.1:9999").unwrap(), listener.accept().unwrap().0)
+               } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:46926") {
+                       (std::net::TcpStream::connect("127.0.0.1:46926").unwrap(), listener.accept().unwrap().0)
+               } else { panic!("Failed to bind to v4 localhost on common ports"); }
+       }
+
        async fn do_basic_connection_test() {
                let secp_ctx = Secp256k1::new();
                let a_key = SecretKey::from_slice(&[1; 32]).unwrap();
@@ -600,13 +638,7 @@ mod tests {
                // address. This may not always be the case in containers and the like, so if this test is
                // failing for you check that you have a loopback interface and it is configured with
                // 127.0.0.1.
-               let (conn_a, conn_b) = if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9735") {
-                       (std::net::TcpStream::connect("127.0.0.1:9735").unwrap(), listener.accept().unwrap().0)
-               } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:9999") {
-                       (std::net::TcpStream::connect("127.0.0.1:9999").unwrap(), listener.accept().unwrap().0)
-               } else if let Ok(listener) = std::net::TcpListener::bind("127.0.0.1:46926") {
-                       (std::net::TcpStream::connect("127.0.0.1:46926").unwrap(), listener.accept().unwrap().0)
-               } else { panic!("Failed to bind to v4 localhost on common ports"); };
+               let (conn_a, conn_b) = make_tcp_connection();
 
                let fut_a = super::setup_outbound(Arc::clone(&a_manager), b_pub, conn_a);
                let fut_b = super::setup_inbound(b_manager, conn_b);
@@ -634,8 +666,53 @@ mod tests {
        async fn basic_threaded_connection_test() {
                do_basic_connection_test().await;
        }
+
        #[tokio::test]
        async fn basic_unthreaded_connection_test() {
                do_basic_connection_test().await;
        }
+
+       async fn race_disconnect_accept() {
+               // Previously, if we handed an already-disconnected socket to `setup_inbound` we'd panic.
+               // This attempts to find other similar races by opening connections and shutting them down
+               // while connecting. Sadly in testing this did *not* reproduce the previous issue.
+               let secp_ctx = Secp256k1::new();
+               let a_key = SecretKey::from_slice(&[1; 32]).unwrap();
+               let b_key = SecretKey::from_slice(&[2; 32]).unwrap();
+               let b_pub = PublicKey::from_secret_key(&secp_ctx, &b_key);
+
+               let a_manager = Arc::new(PeerManager::new(MessageHandler {
+                       chan_handler: Arc::new(lightning::ln::peer_handler::ErroringMessageHandler::new()),
+                       route_handler: Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{}),
+               }, a_key, &[1; 32], Arc::new(TestLogger()), Arc::new(lightning::ln::peer_handler::IgnoringMessageHandler{})));
+
+               // Make two connections, one for an inbound and one for an outbound connection
+               let conn_a = {
+                       let (conn_a, _) = make_tcp_connection();
+                       conn_a
+               };
+               let conn_b = {
+                       let (_, conn_b) = make_tcp_connection();
+                       conn_b
+               };
+
+               // Call connection setup inside new tokio tasks.
+               let manager_reference = Arc::clone(&a_manager);
+               tokio::spawn(async move {
+                       super::setup_inbound(manager_reference, conn_a).await
+               });
+               tokio::spawn(async move {
+                       super::setup_outbound(a_manager, b_pub, conn_b).await
+               });
+       }
+
+       #[tokio::test(flavor = "multi_thread")]
+       async fn threaded_race_disconnect_accept() {
+               race_disconnect_accept().await;
+       }
+
+       #[tokio::test]
+       async fn unthreaded_race_disconnect_accept() {
+               race_disconnect_accept().await;
+       }
 }
index d97cd017b2b1856c30b3cb2ea6038c334851a314..1bf4c5d2e75f87a1b759963b09adad28cadb838e 100644 (file)
@@ -16,7 +16,7 @@ rustdoc-args = ["--cfg", "docsrs"]
 _bench_unstable = ["lightning/_bench_unstable"]
 
 [dependencies]
-bitcoin = "0.27"
+bitcoin = "0.28.1"
 lightning = { version = "0.0.106", path = "../lightning" }
 libc = "0.2"
 
index 25bd00f5e9539ee8b486b79cb7436278015a5698..4adbb33e5b26fd886fb64d05d3d315efd3f28b16 100644 (file)
@@ -84,7 +84,6 @@ mod tests {
        use super::{write_to_file};
        use std::fs;
        use std::io;
-       use std::io::Write;
        use std::path::PathBuf;
 
        struct TestWriteable{}
index a9df766731b6c59d1bc02b9e9299fe4dc2964a35..5a54d042a32c55654d2be0e8fe8bc3c65113a237 100644 (file)
@@ -38,9 +38,7 @@ grind_signatures = []
 default = ["std", "grind_signatures"]
 
 [dependencies]
-bitcoin = { version = "0.27", default-features = false, features = ["secp-recovery"] }
-# TODO remove this once rust-bitcoin PR #637 is released
-secp256k1 = { version = "0.20.2", default-features = false, features = ["alloc"] }
+bitcoin = { version = "0.28.1", default-features = false, features = ["secp-recovery"] }
 
 hashbrown = { version = "0.11", optional = true }
 hex = { version = "0.4", optional = true }
@@ -52,10 +50,8 @@ core2 = { version = "0.3.0", optional = true, default-features = false }
 [dev-dependencies]
 hex = "0.4"
 regex = "0.2.11"
-# TODO remove this once rust-bitcoin PR #637 is released
-secp256k1 = { version = "0.20.2", default-features = false, features = ["alloc"] }
 
 [dev-dependencies.bitcoin]
-version = "0.27"
+version = "0.28.1"
 default-features = false
 features = ["bitcoinconsensus", "secp-recovery"]
index e05c7e031214537b3f56ae3e56052c6800ed117d..503e6bdee0669551d1853932447a5db08fc92c17 100644 (file)
@@ -23,7 +23,7 @@
 //! events. The remote server would make use of [`ChainMonitor`] for block processing and for
 //! servicing [`ChannelMonitor`] updates from the client.
 
-use bitcoin::blockdata::block::{Block, BlockHeader};
+use bitcoin::blockdata::block::BlockHeader;
 use bitcoin::hash_types::Txid;
 
 use chain;
@@ -501,9 +501,7 @@ where
        L::Target: Logger,
        P::Target: Persist<ChannelSigner>,
 {
-       fn block_connected(&self, block: &Block, height: u32) {
-               let header = &block.header;
-               let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
+       fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
                log_debug!(self.logger, "New best block {} at height {} provided via block_connected", header.block_hash(), height);
                self.process_chain_data(header, Some(height), &txdata, |monitor, txdata| {
                        monitor.block_connected(
index fb2cbaddf533a9aa56cbc8586ba7f68f997345f7..738fff3837ca045f5c8c9e3e93e588bb88974191 100644 (file)
@@ -20,7 +20,7 @@
 //! security-domain-separated system design, you should consider having multiple paths for
 //! ChannelMonitors to get out of the HSM and onto monitoring devices.
 
-use bitcoin::blockdata::block::{Block, BlockHeader};
+use bitcoin::blockdata::block::BlockHeader;
 use bitcoin::blockdata::transaction::{TxOut,Transaction};
 use bitcoin::blockdata::script::{Script, Builder};
 use bitcoin::blockdata::opcodes;
@@ -29,8 +29,8 @@ use bitcoin::hashes::Hash;
 use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash};
 
-use bitcoin::secp256k1::{Secp256k1,Signature};
-use bitcoin::secp256k1::key::{SecretKey,PublicKey};
+use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
+use bitcoin::secp256k1::{SecretKey, PublicKey};
 use bitcoin::secp256k1;
 
 use ln::{PaymentHash, PaymentPreimage};
@@ -2653,7 +2653,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                        // appears to be spending the correct type (ie that the match would
                                                        // actually succeed in BIP 158/159-style filters).
                                                        if _script_pubkey.is_v0_p2wsh() {
-                                                               assert_eq!(&bitcoin::Address::p2wsh(&Script::from(input.witness.last().unwrap().clone()), bitcoin::Network::Bitcoin).script_pubkey(), _script_pubkey);
+                                                               assert_eq!(&bitcoin::Address::p2wsh(&Script::from(input.witness.last().unwrap().to_vec()), bitcoin::Network::Bitcoin).script_pubkey(), _script_pubkey);
                                                        } else if _script_pubkey.is_v0_p2wpkh() {
                                                                assert_eq!(&bitcoin::Address::p2wpkh(&bitcoin::PublicKey::from_slice(&input.witness.last().unwrap()).unwrap(), bitcoin::Network::Bitcoin).unwrap().script_pubkey(), _script_pubkey);
                                                        } else { panic!(); }
@@ -2736,20 +2736,23 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
        fn is_resolving_htlc_output<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) where L::Target: Logger {
                'outer_loop: for input in &tx.input {
                        let mut payment_data = None;
-                       let revocation_sig_claim = (input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::OfferedHTLC) && input.witness[1].len() == 33)
-                               || (input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::AcceptedHTLC) && input.witness[1].len() == 33);
-                       let accepted_preimage_claim = input.witness.len() == 5 && HTLCType::scriptlen_to_htlctype(input.witness[4].len()) == Some(HTLCType::AcceptedHTLC);
+                       let witness_items = input.witness.len();
+                       let htlctype = input.witness.last().map(|w| w.len()).and_then(HTLCType::scriptlen_to_htlctype);
+                       let prev_last_witness_len = input.witness.second_to_last().map(|w| w.len()).unwrap_or(0);
+                       let revocation_sig_claim = (witness_items == 3 && htlctype == Some(HTLCType::OfferedHTLC) && prev_last_witness_len == 33)
+                               || (witness_items == 3 && htlctype == Some(HTLCType::AcceptedHTLC) && prev_last_witness_len == 33);
+                       let accepted_preimage_claim = witness_items == 5 && htlctype == Some(HTLCType::AcceptedHTLC);
                        #[cfg(not(fuzzing))]
-                       let accepted_timeout_claim = input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::AcceptedHTLC) && !revocation_sig_claim;
-                       let offered_preimage_claim = input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::OfferedHTLC) && !revocation_sig_claim;
+                       let accepted_timeout_claim = witness_items == 3 && htlctype == Some(HTLCType::AcceptedHTLC) && !revocation_sig_claim;
+                       let offered_preimage_claim = witness_items == 3 && htlctype == Some(HTLCType::OfferedHTLC) && !revocation_sig_claim;
                        #[cfg(not(fuzzing))]
-                       let offered_timeout_claim = input.witness.len() == 5 && HTLCType::scriptlen_to_htlctype(input.witness[4].len()) == Some(HTLCType::OfferedHTLC);
+                       let offered_timeout_claim = witness_items == 5 && htlctype == Some(HTLCType::OfferedHTLC);
 
                        let mut payment_preimage = PaymentPreimage([0; 32]);
                        if accepted_preimage_claim {
-                               payment_preimage.0.copy_from_slice(&input.witness[3]);
+                               payment_preimage.0.copy_from_slice(input.witness.second_to_last().unwrap());
                        } else if offered_preimage_claim {
-                               payment_preimage.0.copy_from_slice(&input.witness[1]);
+                               payment_preimage.0.copy_from_slice(input.witness.second_to_last().unwrap());
                        }
 
                        macro_rules! log_claim {
@@ -3007,9 +3010,8 @@ where
        F::Target: FeeEstimator,
        L::Target: Logger,
 {
-       fn block_connected(&self, block: &Block, height: u32) {
-               let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
-               self.0.block_connected(&block.header, &txdata, height, &*self.1, &*self.2, &*self.3);
+       fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
+               self.0.block_connected(header, txdata, height, &*self.1, &*self.2, &*self.3);
        }
 
        fn block_disconnected(&self, header: &BlockHeader, height: u32) {
@@ -3323,15 +3325,15 @@ mod tests {
        use bitcoin::blockdata::block::BlockHeader;
        use bitcoin::blockdata::script::{Script, Builder};
        use bitcoin::blockdata::opcodes;
-       use bitcoin::blockdata::transaction::{Transaction, TxIn, TxOut, SigHashType};
+       use bitcoin::blockdata::transaction::{Transaction, TxIn, TxOut, EcdsaSighashType};
        use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
-       use bitcoin::util::bip143;
+       use bitcoin::util::sighash;
        use bitcoin::hashes::Hash;
        use bitcoin::hashes::sha256::Hash as Sha256;
        use bitcoin::hashes::hex::FromHex;
        use bitcoin::hash_types::{BlockHash, Txid};
        use bitcoin::network::constants::Network;
-       use bitcoin::secp256k1::key::{SecretKey,PublicKey};
+       use bitcoin::secp256k1::{SecretKey,PublicKey};
        use bitcoin::secp256k1::Secp256k1;
 
        use hex;
@@ -3356,6 +3358,7 @@ mod tests {
        use util::ser::{ReadableArgs, Writeable};
        use sync::{Arc, Mutex};
        use io;
+       use bitcoin::Witness;
        use prelude::*;
 
        fn do_test_funding_spend_refuses_updates(use_local_txn: bool) {
@@ -3609,24 +3612,27 @@ mod tests {
                                        transaction_output_index: Some($idx as u32),
                                };
                                let redeem_script = if *$weight == WEIGHT_REVOKED_OUTPUT { chan_utils::get_revokeable_redeemscript(&pubkey, 256, &pubkey) } else { chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, $opt_anchors, &pubkey, &pubkey, &pubkey) };
-                               let sighash = hash_to_message!(&$sighash_parts.signature_hash($idx, &redeem_script, $amount, SigHashType::All)[..]);
-                               let sig = secp_ctx.sign(&sighash, &privkey);
-                               $sighash_parts.access_witness($idx).push(sig.serialize_der().to_vec());
-                               $sighash_parts.access_witness($idx)[0].push(SigHashType::All as u8);
-                               $sum_actual_sigs += $sighash_parts.access_witness($idx)[0].len();
+                               let sighash = hash_to_message!(&$sighash_parts.segwit_signature_hash($idx, &redeem_script, $amount, EcdsaSighashType::All).unwrap()[..]);
+                               let sig = secp_ctx.sign_ecdsa(&sighash, &privkey);
+                               let mut ser_sig = sig.serialize_der().to_vec();
+                               ser_sig.push(EcdsaSighashType::All as u8);
+                               $sum_actual_sigs += ser_sig.len();
+                               let witness = $sighash_parts.witness_mut($idx).unwrap();
+                               witness.push(ser_sig);
                                if *$weight == WEIGHT_REVOKED_OUTPUT {
-                                       $sighash_parts.access_witness($idx).push(vec!(1));
+                                       witness.push(vec!(1));
                                } else if *$weight == weight_revoked_offered_htlc($opt_anchors) || *$weight == weight_revoked_received_htlc($opt_anchors) {
-                                       $sighash_parts.access_witness($idx).push(pubkey.clone().serialize().to_vec());
+                                       witness.push(pubkey.clone().serialize().to_vec());
                                } else if *$weight == weight_received_htlc($opt_anchors) {
-                                       $sighash_parts.access_witness($idx).push(vec![0]);
+                                       witness.push(vec![0]);
                                } else {
-                                       $sighash_parts.access_witness($idx).push(PaymentPreimage([1; 32]).0.to_vec());
+                                       witness.push(PaymentPreimage([1; 32]).0.to_vec());
                                }
-                               $sighash_parts.access_witness($idx).push(redeem_script.into_bytes());
-                               println!("witness[0] {}", $sighash_parts.access_witness($idx)[0].len());
-                               println!("witness[1] {}", $sighash_parts.access_witness($idx)[1].len());
-                               println!("witness[2] {}", $sighash_parts.access_witness($idx)[2].len());
+                               witness.push(redeem_script.into_bytes());
+                               let witness = witness.to_vec();
+                               println!("witness[0] {}", witness[0].len());
+                               println!("witness[1] {}", witness[1].len());
+                               println!("witness[2] {}", witness[2].len());
                        }
                }
 
@@ -3645,24 +3651,24 @@ mod tests {
                                        },
                                        script_sig: Script::new(),
                                        sequence: 0xfffffffd,
-                                       witness: Vec::new(),
+                                       witness: Witness::new(),
                                });
                        }
                        claim_tx.output.push(TxOut {
                                script_pubkey: script_pubkey.clone(),
                                value: 0,
                        });
-                       let base_weight = claim_tx.get_weight();
+                       let base_weight = claim_tx.weight();
                        let inputs_weight = vec![WEIGHT_REVOKED_OUTPUT, weight_revoked_offered_htlc(opt_anchors), weight_revoked_offered_htlc(opt_anchors), weight_revoked_received_htlc(opt_anchors)];
                        let mut inputs_total_weight = 2; // count segwit flags
                        {
-                               let mut sighash_parts = bip143::SigHashCache::new(&mut claim_tx);
+                               let mut sighash_parts = sighash::SighashCache::new(&mut claim_tx);
                                for (idx, inp) in inputs_weight.iter().enumerate() {
                                        sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs, opt_anchors);
                                        inputs_total_weight += inp;
                                }
                        }
-                       assert_eq!(base_weight + inputs_total_weight as usize,  claim_tx.get_weight() + /* max_length_sig */ (73 * inputs_weight.len() - sum_actual_sigs));
+                       assert_eq!(base_weight + inputs_total_weight as usize,  claim_tx.weight() + /* max_length_sig */ (73 * inputs_weight.len() - sum_actual_sigs));
                }
 
                // Claim tx with 1 offered HTLCs, 3 received HTLCs
@@ -3677,24 +3683,24 @@ mod tests {
                                        },
                                        script_sig: Script::new(),
                                        sequence: 0xfffffffd,
-                                       witness: Vec::new(),
+                                       witness: Witness::new(),
                                });
                        }
                        claim_tx.output.push(TxOut {
                                script_pubkey: script_pubkey.clone(),
                                value: 0,
                        });
-                       let base_weight = claim_tx.get_weight();
+                       let base_weight = claim_tx.weight();
                        let inputs_weight = vec![weight_offered_htlc(opt_anchors), weight_received_htlc(opt_anchors), weight_received_htlc(opt_anchors), weight_received_htlc(opt_anchors)];
                        let mut inputs_total_weight = 2; // count segwit flags
                        {
-                               let mut sighash_parts = bip143::SigHashCache::new(&mut claim_tx);
+                               let mut sighash_parts = sighash::SighashCache::new(&mut claim_tx);
                                for (idx, inp) in inputs_weight.iter().enumerate() {
                                        sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs, opt_anchors);
                                        inputs_total_weight += inp;
                                }
                        }
-                       assert_eq!(base_weight + inputs_total_weight as usize,  claim_tx.get_weight() + /* max_length_sig */ (73 * inputs_weight.len() - sum_actual_sigs));
+                       assert_eq!(base_weight + inputs_total_weight as usize,  claim_tx.weight() + /* max_length_sig */ (73 * inputs_weight.len() - sum_actual_sigs));
                }
 
                // Justice tx with 1 revoked HTLC-Success tx output
@@ -3708,23 +3714,23 @@ mod tests {
                                },
                                script_sig: Script::new(),
                                sequence: 0xfffffffd,
-                               witness: Vec::new(),
+                               witness: Witness::new(),
                        });
                        claim_tx.output.push(TxOut {
                                script_pubkey: script_pubkey.clone(),
                                value: 0,
                        });
-                       let base_weight = claim_tx.get_weight();
+                       let base_weight = claim_tx.weight();
                        let inputs_weight = vec![WEIGHT_REVOKED_OUTPUT];
                        let mut inputs_total_weight = 2; // count segwit flags
                        {
-                               let mut sighash_parts = bip143::SigHashCache::new(&mut claim_tx);
+                               let mut sighash_parts = sighash::SighashCache::new(&mut claim_tx);
                                for (idx, inp) in inputs_weight.iter().enumerate() {
                                        sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs, opt_anchors);
                                        inputs_total_weight += inp;
                                }
                        }
-                       assert_eq!(base_weight + inputs_total_weight as usize, claim_tx.get_weight() + /* max_length_isg */ (73 * inputs_weight.len() - sum_actual_sigs));
+                       assert_eq!(base_weight + inputs_total_weight as usize, claim_tx.weight() + /* max_length_isg */ (73 * inputs_weight.len() - sum_actual_sigs));
                }
        }
 
index be31036220a5a720e27d561697aad4a5daa7869c..33c88cf11e38c672d6394bd3027b89b8664bbf0b 100644 (file)
 //! spendable on-chain outputs which the user owns and is responsible for using just as any other
 //! on-chain output which is theirs.
 
-use bitcoin::blockdata::transaction::{Transaction, TxOut, TxIn, SigHashType};
+use bitcoin::blockdata::transaction::{Transaction, TxOut, TxIn, EcdsaSighashType};
 use bitcoin::blockdata::script::{Script, Builder};
 use bitcoin::blockdata::opcodes;
 use bitcoin::network::constants::Network;
 use bitcoin::util::bip32::{ExtendedPrivKey, ExtendedPubKey, ChildNumber};
-use bitcoin::util::bip143;
+use bitcoin::util::sighash;
 
 use bitcoin::bech32::u5;
 use bitcoin::hashes::{Hash, HashEngine};
@@ -25,10 +25,10 @@ use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hashes::sha256d::Hash as Sha256dHash;
 use bitcoin::hash_types::WPubkeyHash;
 
-use bitcoin::secp256k1::key::{SecretKey, PublicKey};
-use bitcoin::secp256k1::{Secp256k1, Signature, Signing};
-use bitcoin::secp256k1::recovery::RecoverableSignature;
-use bitcoin::secp256k1;
+use bitcoin::secp256k1::{SecretKey, PublicKey};
+use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature, Signing};
+use bitcoin::secp256k1::ecdsa::RecoverableSignature;
+use bitcoin::{secp256k1, Witness};
 
 use util::{byte_utils, transaction_utils};
 use util::crypto::{hkdf_extract_expand_twice, sign};
@@ -588,16 +588,16 @@ impl InMemorySigner {
                if spend_tx.input[input_idx].previous_output != descriptor.outpoint.into_bitcoin_outpoint() { return Err(()); }
 
                let remotepubkey = self.pubkeys().payment_point;
-               let witness_script = bitcoin::Address::p2pkh(&::bitcoin::PublicKey{compressed: true, key: remotepubkey}, Network::Testnet).script_pubkey();
-               let sighash = hash_to_message!(&bip143::SigHashCache::new(spend_tx).signature_hash(input_idx, &witness_script, descriptor.output.value, SigHashType::All)[..]);
+               let witness_script = bitcoin::Address::p2pkh(&::bitcoin::PublicKey{compressed: true, inner: remotepubkey}, Network::Testnet).script_pubkey();
+               let sighash = hash_to_message!(&sighash::SighashCache::new(spend_tx).segwit_signature_hash(input_idx, &witness_script, descriptor.output.value, EcdsaSighashType::All).unwrap()[..]);
                let remotesig = sign(secp_ctx, &sighash, &self.payment_key);
-               let payment_script = bitcoin::Address::p2wpkh(&::bitcoin::PublicKey{compressed: true, key: remotepubkey}, Network::Bitcoin).unwrap().script_pubkey();
+               let payment_script = bitcoin::Address::p2wpkh(&::bitcoin::PublicKey{compressed: true, inner: remotepubkey}, Network::Bitcoin).unwrap().script_pubkey();
 
                if payment_script != descriptor.output.script_pubkey  { return Err(()); }
 
                let mut witness = Vec::with_capacity(2);
                witness.push(remotesig.serialize_der().to_vec());
-               witness[0].push(SigHashType::All as u8);
+               witness[0].push(EcdsaSighashType::All as u8);
                witness.push(remotepubkey.serialize().to_vec());
                Ok(witness)
        }
@@ -623,7 +623,7 @@ impl InMemorySigner {
                        .expect("We constructed the payment_base_key, so we can only fail here if the RNG is busted.");
                let delayed_payment_pubkey = PublicKey::from_secret_key(&secp_ctx, &delayed_payment_key);
                let witness_script = chan_utils::get_revokeable_redeemscript(&descriptor.revocation_pubkey, descriptor.to_self_delay, &delayed_payment_pubkey);
-               let sighash = hash_to_message!(&bip143::SigHashCache::new(spend_tx).signature_hash(input_idx, &witness_script, descriptor.output.value, SigHashType::All)[..]);
+               let sighash = hash_to_message!(&sighash::SighashCache::new(spend_tx).segwit_signature_hash(input_idx, &witness_script, descriptor.output.value, EcdsaSighashType::All).unwrap()[..]);
                let local_delayedsig = sign(secp_ctx, &sighash, &delayed_payment_key);
                let payment_script = bitcoin::Address::p2wsh(&witness_script, Network::Bitcoin).script_pubkey();
 
@@ -631,7 +631,7 @@ impl InMemorySigner {
 
                let mut witness = Vec::with_capacity(3);
                witness.push(local_delayedsig.serialize_der().to_vec());
-               witness[0].push(SigHashType::All as u8);
+               witness[0].push(EcdsaSighashType::All as u8);
                witness.push(vec!()); //MINIMALIF
                witness.push(witness_script.clone().into_bytes());
                Ok(witness)
@@ -670,8 +670,8 @@ impl BaseSign for InMemorySigner {
                for htlc in commitment_tx.htlcs() {
                        let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_tx.feerate_per_kw(), self.holder_selected_contest_delay(), htlc, self.opt_anchors(), &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
                        let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.opt_anchors(), &keys);
-                       let htlc_sighashtype = if self.opt_anchors() { SigHashType::SinglePlusAnyoneCanPay } else { SigHashType::All };
-                       let htlc_sighash = hash_to_message!(&bip143::SigHashCache::new(&htlc_tx).signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype)[..]);
+                       let htlc_sighashtype = if self.opt_anchors() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
+                       let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
                        let holder_htlc_key = chan_utils::derive_private_key(&secp_ctx, &keys.per_commitment_point, &self.htlc_base_key).map_err(|_| ())?;
                        htlc_sigs.push(sign(secp_ctx, &htlc_sighash, &holder_htlc_key));
                }
@@ -712,8 +712,8 @@ impl BaseSign for InMemorySigner {
                        let counterparty_delayedpubkey = chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &self.counterparty_pubkeys().delayed_payment_basepoint).map_err(|_| ())?;
                        chan_utils::get_revokeable_redeemscript(&revocation_pubkey, self.holder_selected_contest_delay(), &counterparty_delayedpubkey)
                };
-               let mut sighash_parts = bip143::SigHashCache::new(justice_tx);
-               let sighash = hash_to_message!(&sighash_parts.signature_hash(input, &witness_script, amount, SigHashType::All)[..]);
+               let mut sighash_parts = sighash::SighashCache::new(justice_tx);
+               let sighash = hash_to_message!(&sighash_parts.segwit_signature_hash(input, &witness_script, amount, EcdsaSighashType::All).unwrap()[..]);
                return Ok(sign(secp_ctx, &sighash, &revocation_key))
        }
 
@@ -726,8 +726,8 @@ impl BaseSign for InMemorySigner {
                        let holder_htlcpubkey = chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &self.pubkeys().htlc_basepoint).map_err(|_| ())?;
                        chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, self.opt_anchors(), &counterparty_htlcpubkey, &holder_htlcpubkey, &revocation_pubkey)
                };
-               let mut sighash_parts = bip143::SigHashCache::new(justice_tx);
-               let sighash = hash_to_message!(&sighash_parts.signature_hash(input, &witness_script, amount, SigHashType::All)[..]);
+               let mut sighash_parts = sighash::SighashCache::new(justice_tx);
+               let sighash = hash_to_message!(&sighash_parts.segwit_signature_hash(input, &witness_script, amount, EcdsaSighashType::All).unwrap()[..]);
                return Ok(sign(secp_ctx, &sighash, &revocation_key))
        }
 
@@ -740,8 +740,8 @@ impl BaseSign for InMemorySigner {
                                        } else { return Err(()) }
                                } else { return Err(()) }
                        } else { return Err(()) };
-                       let mut sighash_parts = bip143::SigHashCache::new(htlc_tx);
-                       let sighash = hash_to_message!(&sighash_parts.signature_hash(input, &witness_script, amount, SigHashType::All)[..]);
+                       let mut sighash_parts = sighash::SighashCache::new(htlc_tx);
+                       let sighash = hash_to_message!(&sighash_parts.segwit_signature_hash(input, &witness_script, amount, EcdsaSighashType::All).unwrap()[..]);
                        return Ok(sign(secp_ctx, &sighash, &htlc_key))
                }
                Err(())
@@ -884,10 +884,10 @@ impl KeysManager {
                // Note that when we aren't serializing the key, network doesn't matter
                match ExtendedPrivKey::new_master(Network::Testnet, seed) {
                        Ok(master_key) => {
-                               let node_secret = master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(0).unwrap()).expect("Your RNG is busted").private_key.key;
+                               let node_secret = master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(0).unwrap()).expect("Your RNG is busted").private_key;
                                let destination_script = match master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(1).unwrap()) {
                                        Ok(destination_key) => {
-                                               let wpubkey_hash = WPubkeyHash::hash(&ExtendedPubKey::from_private(&secp_ctx, &destination_key).public_key.to_bytes());
+                                               let wpubkey_hash = WPubkeyHash::hash(&ExtendedPubKey::from_priv(&secp_ctx, &destination_key).to_pub().to_bytes());
                                                Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0)
                                                              .push_slice(&wpubkey_hash.into_inner())
                                                              .into_script()
@@ -895,12 +895,12 @@ impl KeysManager {
                                        Err(_) => panic!("Your RNG is busted"),
                                };
                                let shutdown_pubkey = match master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(2).unwrap()) {
-                                       Ok(shutdown_key) => ExtendedPubKey::from_private(&secp_ctx, &shutdown_key).public_key.key,
+                                       Ok(shutdown_key) => ExtendedPubKey::from_priv(&secp_ctx, &shutdown_key).public_key,
                                        Err(_) => panic!("Your RNG is busted"),
                                };
                                let channel_master_key = master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(3).unwrap()).expect("Your RNG is busted");
                                let rand_bytes_master_key = master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(4).unwrap()).expect("Your RNG is busted");
-                               let inbound_payment_key: SecretKey = master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(5).unwrap()).expect("Your RNG is busted").private_key.key;
+                               let inbound_payment_key: SecretKey = master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx(5).unwrap()).expect("Your RNG is busted").private_key;
                                let mut inbound_pmt_key_bytes = [0; 32];
                                inbound_pmt_key_bytes.copy_from_slice(&inbound_payment_key[..]);
 
@@ -951,7 +951,7 @@ impl KeysManager {
                // entropy, everything else just ensures uniqueness. We rely on the unique_start (ie
                // starting_time provided in the constructor) to be unique.
                let child_privkey = self.channel_master_key.ckd_priv(&self.secp_ctx, ChildNumber::from_hardened_idx(chan_id as u32).expect("key space exhausted")).expect("Your RNG is busted");
-               unique_start.input(&child_privkey.private_key.key[..]);
+               unique_start.input(&child_privkey.private_key[..]);
 
                let seed = Sha256::from_engine(unique_start).into_inner();
 
@@ -1014,7 +1014,7 @@ impl KeysManager {
                                                previous_output: descriptor.outpoint.into_bitcoin_outpoint(),
                                                script_sig: Script::new(),
                                                sequence: 0,
-                                               witness: Vec::new(),
+                                               witness: Witness::new(),
                                        });
                                        witness_weight += StaticPaymentOutputDescriptor::MAX_WITNESS_LENGTH;
                                        input_value += descriptor.output.value;
@@ -1025,7 +1025,7 @@ impl KeysManager {
                                                previous_output: descriptor.outpoint.into_bitcoin_outpoint(),
                                                script_sig: Script::new(),
                                                sequence: descriptor.to_self_delay as u32,
-                                               witness: Vec::new(),
+                                               witness: Witness::new(),
                                        });
                                        witness_weight += DelayedPaymentOutputDescriptor::MAX_WITNESS_LENGTH;
                                        input_value += descriptor.output.value;
@@ -1036,7 +1036,7 @@ impl KeysManager {
                                                previous_output: outpoint.into_bitcoin_outpoint(),
                                                script_sig: Script::new(),
                                                sequence: 0,
-                                               witness: Vec::new(),
+                                               witness: Witness::new(),
                                        });
                                        witness_weight += 1 + 73 + 34;
                                        input_value += output.value;
@@ -1064,7 +1064,7 @@ impl KeysManager {
                                                        self.derive_channel_keys(descriptor.channel_value_satoshis, &descriptor.channel_keys_id),
                                                        descriptor.channel_keys_id));
                                        }
-                                       spend_tx.input[input_idx].witness = keys_cache.as_ref().unwrap().0.sign_counterparty_payment_input(&spend_tx, input_idx, &descriptor, &secp_ctx)?;
+                                       spend_tx.input[input_idx].witness = Witness::from_vec(keys_cache.as_ref().unwrap().0.sign_counterparty_payment_input(&spend_tx, input_idx, &descriptor, &secp_ctx)?);
                                },
                                SpendableOutputDescriptor::DelayedPaymentOutput(descriptor) => {
                                        if keys_cache.is_none() || keys_cache.as_ref().unwrap().1 != descriptor.channel_keys_id {
@@ -1072,7 +1072,7 @@ impl KeysManager {
                                                        self.derive_channel_keys(descriptor.channel_value_satoshis, &descriptor.channel_keys_id),
                                                        descriptor.channel_keys_id));
                                        }
-                                       spend_tx.input[input_idx].witness = keys_cache.as_ref().unwrap().0.sign_dynamic_p2wsh_input(&spend_tx, input_idx, &descriptor, &secp_ctx)?;
+                                       spend_tx.input[input_idx].witness = Witness::from_vec(keys_cache.as_ref().unwrap().0.sign_dynamic_p2wsh_input(&spend_tx, input_idx, &descriptor, &secp_ctx)?);
                                },
                                SpendableOutputDescriptor::StaticOutput { ref output, .. } => {
                                        let derivation_idx = if output.script_pubkey == self.destination_script {
@@ -1092,29 +1092,30 @@ impl KeysManager {
                                                        Err(_) => panic!("Your rng is busted"),
                                                }
                                        };
-                                       let pubkey = ExtendedPubKey::from_private(&secp_ctx, &secret).public_key;
+                                       let pubkey = ExtendedPubKey::from_priv(&secp_ctx, &secret).to_pub();
                                        if derivation_idx == 2 {
-                                               assert_eq!(pubkey.key, self.shutdown_pubkey);
+                                               assert_eq!(pubkey.inner, self.shutdown_pubkey);
                                        }
                                        let witness_script = bitcoin::Address::p2pkh(&pubkey, Network::Testnet).script_pubkey();
                                        let payment_script = bitcoin::Address::p2wpkh(&pubkey, Network::Testnet).expect("uncompressed key found").script_pubkey();
 
                                        if payment_script != output.script_pubkey { return Err(()); };
 
-                                       let sighash = hash_to_message!(&bip143::SigHashCache::new(&spend_tx).signature_hash(input_idx, &witness_script, output.value, SigHashType::All)[..]);
-                                       let sig = sign(secp_ctx, &sighash, &secret.private_key.key);
-                                       spend_tx.input[input_idx].witness.push(sig.serialize_der().to_vec());
-                                       spend_tx.input[input_idx].witness[0].push(SigHashType::All as u8);
-                                       spend_tx.input[input_idx].witness.push(pubkey.key.serialize().to_vec());
+                                       let sighash = hash_to_message!(&sighash::SighashCache::new(&spend_tx).segwit_signature_hash(input_idx, &witness_script, output.value, EcdsaSighashType::All).unwrap()[..]);
+                                       let sig = sign(secp_ctx, &sighash, &secret.private_key);
+                                       let mut sig_ser = sig.serialize_der().to_vec();
+                                       sig_ser.push(EcdsaSighashType::All as u8);
+                                       spend_tx.input[input_idx].witness.push(sig_ser);
+                                       spend_tx.input[input_idx].witness.push(pubkey.inner.serialize().to_vec());
                                },
                        }
                        input_idx += 1;
                }
 
-               debug_assert!(expected_max_weight >= spend_tx.get_weight());
+               debug_assert!(expected_max_weight >= spend_tx.weight());
                // Note that witnesses with a signature vary somewhat in size, so allow
                // `expected_max_weight` to overshoot by up to 3 bytes per input.
-               debug_assert!(expected_max_weight <= spend_tx.get_weight() + descriptors.len() * 3);
+               debug_assert!(expected_max_weight <= spend_tx.weight() + descriptors.len() * 3);
 
                Ok(spend_tx)
        }
@@ -1157,7 +1158,7 @@ impl KeysInterface for KeysManager {
 
                let child_ix = self.rand_bytes_child_index.fetch_add(1, Ordering::AcqRel);
                let child_privkey = self.rand_bytes_master_key.ckd_priv(&self.secp_ctx, ChildNumber::from_hardened_idx(child_ix as u32).expect("key space exhausted")).expect("Your RNG is busted");
-               sha.input(&child_privkey.private_key.key[..]);
+               sha.input(&child_privkey.private_key[..]);
 
                sha.input(b"Unique Secure Random Bytes Salt");
                Sha256::from_engine(sha).into_inner()
@@ -1173,7 +1174,7 @@ impl KeysInterface for KeysManager {
                        Recipient::Node => self.get_node_secret(Recipient::Node)?,
                        Recipient::PhantomNode => return Err(()),
                };
-               Ok(self.secp_ctx.sign_recoverable(&hash_to_message!(&Sha256::hash(&preimage)), &secret))
+               Ok(self.secp_ctx.sign_ecdsa_recoverable(&hash_to_message!(&Sha256::hash(&preimage)), &secret))
        }
 }
 
@@ -1241,7 +1242,7 @@ impl KeysInterface for PhantomKeysManager {
        fn sign_invoice(&self, hrp_bytes: &[u8], invoice_data: &[u5], recipient: Recipient) -> Result<RecoverableSignature, ()> {
                let preimage = construct_invoice_preimage(&hrp_bytes, &invoice_data);
                let secret = self.get_node_secret(recipient)?;
-               Ok(self.inner.secp_ctx.sign_recoverable(&hash_to_message!(&Sha256::hash(&preimage)), &secret))
+               Ok(self.inner.secp_ctx.sign_ecdsa_recoverable(&hash_to_message!(&Sha256::hash(&preimage)), &secret))
        }
 }
 
index 0bc205fa16afacfcc3196e4caef3fa3fcb043a56..48aa712f39c1d55263f7cd5129e0b3cce3690d41 100644 (file)
@@ -87,9 +87,20 @@ pub trait Access {
 /// sourcing chain data using a block-oriented API should prefer this interface over [`Confirm`].
 /// Such clients fetch the entire header chain whereas clients using [`Confirm`] only fetch headers
 /// when needed.
+///
+/// By using [`Listen::filtered_block_connected`] this interface supports clients fetching the
+/// entire header chain and only blocks with matching transaction data using BIP 157 filters or
+/// other similar filtering.
 pub trait Listen {
+       /// Notifies the listener that a block was added at the given height, with the transaction data
+       /// possibly filtered.
+       fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32);
+
        /// Notifies the listener that a block was added at the given height.
-       fn block_connected(&self, block: &Block, height: u32);
+       fn block_connected(&self, block: &Block, height: u32) {
+               let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
+               self.filtered_block_connected(&block.header, &txdata, height);
+       }
 
        /// Notifies the listener that a block was removed at the given height.
        fn block_disconnected(&self, header: &BlockHeader, height: u32);
@@ -355,8 +366,8 @@ pub struct WatchedOutput {
 }
 
 impl<T: Listen> Listen for core::ops::Deref<Target = T> {
-       fn block_connected(&self, block: &Block, height: u32) {
-               (**self).block_connected(block, height);
+       fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
+               (**self).filtered_block_connected(header, txdata, height);
        }
 
        fn block_disconnected(&self, header: &BlockHeader, height: u32) {
@@ -369,9 +380,9 @@ where
        T::Target: Listen,
        U::Target: Listen,
 {
-       fn block_connected(&self, block: &Block, height: u32) {
-               self.0.block_connected(block, height);
-               self.1.block_connected(block, height);
+       fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
+               self.0.filtered_block_connected(header, txdata, height);
+               self.1.filtered_block_connected(header, txdata, height);
        }
 
        fn block_disconnected(&self, header: &BlockHeader, height: u32) {
index ee6dc9c5c0b6a9fce46d9e610a95bfc872033498..1ca3effabd7e537afc0832cff5085abd3a6abf47 100644 (file)
@@ -18,7 +18,7 @@ use bitcoin::blockdata::script::Script;
 
 use bitcoin::hash_types::Txid;
 
-use bitcoin::secp256k1::{Secp256k1, Signature};
+use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
 use bitcoin::secp256k1;
 
 use ln::msgs::DecodeError;
@@ -394,7 +394,7 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
 
                                let transaction = cached_request.finalize_package(self, output_value, self.destination_script.clone(), logger).unwrap();
                                log_trace!(logger, "...with timer {} and feerate {}", new_timer.unwrap(), new_feerate);
-                               assert!(predicted_weight >= transaction.get_weight());
+                               assert!(predicted_weight >= transaction.weight());
                                return Some((new_timer, new_feerate, transaction))
                        }
                } else {
index 1cc63a8c8605338875a4ac3c7a95fab7b365d794..b0961293c07c59a3ab71d51912576add191928da 100644 (file)
 //! also includes witness weight computation and fee computation methods.
 
 use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR;
-use bitcoin::blockdata::transaction::{TxOut,TxIn, Transaction, SigHashType};
+use bitcoin::blockdata::transaction::{TxOut,TxIn, Transaction, EcdsaSighashType};
 use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
 use bitcoin::blockdata::script::Script;
 
 use bitcoin::hash_types::Txid;
 
-use bitcoin::secp256k1::key::{SecretKey,PublicKey};
+use bitcoin::secp256k1::{SecretKey,PublicKey};
 
 use ln::PaymentPreimage;
 use ln::chan_utils::{TxCreationKeys, HTLCOutputInCommitment};
@@ -36,6 +36,7 @@ use prelude::*;
 use core::cmp;
 use core::mem;
 use core::ops::Deref;
+use bitcoin::Witness;
 
 const MAX_ALLOC_SIZE: usize = 64*1024;
 
@@ -352,8 +353,9 @@ impl PackageSolvingData {
                                        let witness_script = chan_utils::get_revokeable_redeemscript(&chan_keys.revocation_key, outp.on_counterparty_tx_csv, &chan_keys.broadcaster_delayed_payment_key);
                                        //TODO: should we panic on signer failure ?
                                        if let Ok(sig) = onchain_handler.signer.sign_justice_revoked_output(&bumped_tx, i, outp.amount, &outp.per_commitment_key, &onchain_handler.secp_ctx) {
-                                               bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
-                                               bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
+                                               let mut ser_sig = sig.serialize_der().to_vec();
+                                               ser_sig.push(EcdsaSighashType::All as u8);
+                                               bumped_tx.input[i].witness.push(ser_sig);
                                                bumped_tx.input[i].witness.push(vec!(1));
                                                bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
                                        } else { return false; }
@@ -364,8 +366,9 @@ impl PackageSolvingData {
                                        let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&outp.htlc, onchain_handler.opt_anchors(), &chan_keys.broadcaster_htlc_key, &chan_keys.countersignatory_htlc_key, &chan_keys.revocation_key);
                                        //TODO: should we panic on signer failure ?
                                        if let Ok(sig) = onchain_handler.signer.sign_justice_revoked_htlc(&bumped_tx, i, outp.amount, &outp.per_commitment_key, &outp.htlc, &onchain_handler.secp_ctx) {
-                                               bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
-                                               bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
+                                               let mut ser_sig = sig.serialize_der().to_vec();
+                                               ser_sig.push(EcdsaSighashType::All as u8);
+                                               bumped_tx.input[i].witness.push(ser_sig);
                                                bumped_tx.input[i].witness.push(chan_keys.revocation_key.clone().serialize().to_vec());
                                                bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
                                        } else { return false; }
@@ -376,8 +379,9 @@ impl PackageSolvingData {
                                        let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&outp.htlc, onchain_handler.opt_anchors(), &chan_keys.broadcaster_htlc_key, &chan_keys.countersignatory_htlc_key, &chan_keys.revocation_key);
 
                                        if let Ok(sig) = onchain_handler.signer.sign_counterparty_htlc_transaction(&bumped_tx, i, &outp.htlc.amount_msat / 1000, &outp.per_commitment_point, &outp.htlc, &onchain_handler.secp_ctx) {
-                                               bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
-                                               bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
+                                               let mut ser_sig = sig.serialize_der().to_vec();
+                                               ser_sig.push(EcdsaSighashType::All as u8);
+                                               bumped_tx.input[i].witness.push(ser_sig);
                                                bumped_tx.input[i].witness.push(outp.preimage.0.to_vec());
                                                bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
                                        }
@@ -389,8 +393,9 @@ impl PackageSolvingData {
 
                                        bumped_tx.lock_time = outp.htlc.cltv_expiry; // Right now we don't aggregate time-locked transaction, if we do we should set lock_time before to avoid breaking hash computation
                                        if let Ok(sig) = onchain_handler.signer.sign_counterparty_htlc_transaction(&bumped_tx, i, &outp.htlc.amount_msat / 1000, &outp.per_commitment_point, &outp.htlc, &onchain_handler.secp_ctx) {
-                                               bumped_tx.input[i].witness.push(sig.serialize_der().to_vec());
-                                               bumped_tx.input[i].witness[0].push(SigHashType::All as u8);
+                                               let mut ser_sig = sig.serialize_der().to_vec();
+                                               ser_sig.push(EcdsaSighashType::All as u8);
+                                               bumped_tx.input[i].witness.push(ser_sig);
                                                // Due to BIP146 (MINIMALIF) this must be a zero-length element to relay.
                                                bumped_tx.input[i].witness.push(vec![]);
                                                bumped_tx.input[i].witness.push(witness_script.clone().into_bytes());
@@ -620,7 +625,7 @@ impl PackageTemplate {
                                                previous_output: *outpoint,
                                                script_sig: Script::new(),
                                                sequence: 0xfffffffd,
-                                               witness: Vec::new(),
+                                               witness: Witness::new(),
                                        });
                                }
                                for (i, (outpoint, out)) in self.inputs.iter().enumerate() {
@@ -852,7 +857,7 @@ mod tests {
 
        use bitcoin::hashes::hex::FromHex;
 
-       use bitcoin::secp256k1::key::{PublicKey,SecretKey};
+       use bitcoin::secp256k1::{PublicKey,SecretKey};
        use bitcoin::secp256k1::Secp256k1;
 
        macro_rules! dumb_revk_output {
index b31ceacea15852def4203b037d8e36216094f5c4..6b36682f43233f876dcacaf2e1ae34da0fef5e4e 100644 (file)
@@ -362,3 +362,5 @@ fn read_write_lockorder_fail() {
                let _a = a.write().unwrap();
        }
 }
+
+pub type FairRwLock<T> = RwLock<T>;
index 6d4cc50a920cad4cde49b91d0329e227c6ad6379..abdc10c577a4f476b70929dad51499b26f1b0cfe 100644 (file)
@@ -159,6 +159,8 @@ mod sync {
        pub use debug_sync::*;
        #[cfg(not(test))]
        pub use ::std::sync::{Arc, Mutex, Condvar, MutexGuard, RwLock, RwLockReadGuard};
+       #[cfg(not(test))]
+       pub use crate::util::fairrwlock::FairRwLock;
 }
 
 #[cfg(not(feature = "std"))]
index 370c0cc8edfe6737f3f1d65f5dab59ea688ee854..9e987c3deec005debbdb81fd7557a6be75e955f1 100644 (file)
@@ -12,8 +12,8 @@
 
 use bitcoin::blockdata::script::{Script,Builder};
 use bitcoin::blockdata::opcodes;
-use bitcoin::blockdata::transaction::{TxIn,TxOut,OutPoint,Transaction, SigHashType};
-use bitcoin::util::bip143;
+use bitcoin::blockdata::transaction::{TxIn,TxOut,OutPoint,Transaction, EcdsaSighashType};
+use bitcoin::util::sighash;
 
 use bitcoin::hashes::{Hash, HashEngine};
 use bitcoin::hashes::sha256::Hash as Sha256;
@@ -26,10 +26,10 @@ use util::ser::{Readable, Writeable, Writer};
 use util::{byte_utils, transaction_utils};
 
 use bitcoin::hash_types::WPubkeyHash;
-use bitcoin::secp256k1::key::{SecretKey, PublicKey};
-use bitcoin::secp256k1::{Secp256k1, Signature, Message};
+use bitcoin::secp256k1::{SecretKey, PublicKey};
+use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature, Message};
 use bitcoin::secp256k1::Error as SecpError;
-use bitcoin::secp256k1;
+use bitcoin::{secp256k1, Witness};
 
 use io;
 use prelude::*;
@@ -102,7 +102,7 @@ pub fn build_closing_transaction(to_holder_value_sat: u64, to_counterparty_value
                        previous_output: funding_outpoint,
                        script_sig: Script::new(),
                        sequence: 0xffffffff,
-                       witness: Vec::new(),
+                       witness: Witness::new(),
                });
                ins
        };
@@ -615,7 +615,7 @@ pub fn build_htlc_transaction(commitment_txid: &Txid, feerate_per_kw: u32, conte
                },
                script_sig: Script::new(),
                sequence: if opt_anchors { 1 } else { 0 },
-               witness: Vec::new(),
+               witness: Witness::new(),
        });
 
        let weight = if htlc.offered {
@@ -891,16 +891,18 @@ impl HolderCommitmentTransaction {
                // First push the multisig dummy, note that due to BIP147 (NULLDUMMY) it must be a zero-length element.
                let mut tx = self.inner.built.transaction.clone();
                tx.input[0].witness.push(Vec::new());
+               let mut ser_holder_sig = holder_sig.serialize_der().to_vec();
+               ser_holder_sig.push(EcdsaSighashType::All as u8);
+               let mut ser_cp_sig = self.counterparty_sig.serialize_der().to_vec();
+               ser_cp_sig.push(EcdsaSighashType::All as u8);
 
                if self.holder_sig_first {
-                       tx.input[0].witness.push(holder_sig.serialize_der().to_vec());
-                       tx.input[0].witness.push(self.counterparty_sig.serialize_der().to_vec());
+                       tx.input[0].witness.push(ser_holder_sig);
+                       tx.input[0].witness.push(ser_cp_sig);
                } else {
-                       tx.input[0].witness.push(self.counterparty_sig.serialize_der().to_vec());
-                       tx.input[0].witness.push(holder_sig.serialize_der().to_vec());
+                       tx.input[0].witness.push(ser_cp_sig);
+                       tx.input[0].witness.push(ser_holder_sig);
                }
-               tx.input[0].witness[1].push(SigHashType::All as u8);
-               tx.input[0].witness[2].push(SigHashType::All as u8);
 
                tx.input[0].witness.push(funding_redeemscript.as_bytes().to_vec());
                tx
@@ -929,7 +931,7 @@ impl BuiltCommitmentTransaction {
        ///
        /// This can be used to verify a signature.
        pub fn get_sighash_all(&self, funding_redeemscript: &Script, channel_value_satoshis: u64) -> Message {
-               let sighash = &bip143::SigHashCache::new(&self.transaction).signature_hash(0, funding_redeemscript, channel_value_satoshis, SigHashType::All)[..];
+               let sighash = &sighash::SighashCache::new(&self.transaction).segwit_signature_hash(0, funding_redeemscript, channel_value_satoshis, EcdsaSighashType::All).unwrap()[..];
                hash_to_message!(sighash)
        }
 
@@ -1053,7 +1055,7 @@ impl<'a> TrustedClosingTransaction<'a> {
        ///
        /// This can be used to verify a signature.
        pub fn get_sighash_all(&self, funding_redeemscript: &Script, channel_value_satoshis: u64) -> Message {
-               let sighash = &bip143::SigHashCache::new(&self.inner.built).signature_hash(0, funding_redeemscript, channel_value_satoshis, SigHashType::All)[..];
+               let sighash = &sighash::SighashCache::new(&self.inner.built).segwit_signature_hash(0, funding_redeemscript, channel_value_satoshis, EcdsaSighashType::All).unwrap()[..];
                hash_to_message!(sighash)
        }
 
@@ -1291,7 +1293,7 @@ impl CommitmentTransaction {
                                script_sig: Script::new(),
                                sequence: ((0x80 as u32) << 8 * 3)
                                        | ((obscured_commitment_transaction_number >> 3 * 8) as u32),
-                               witness: Vec::new(),
+                               witness: Witness::new(),
                        });
                        ins
                };
@@ -1401,7 +1403,7 @@ impl<'a> TrustedCommitmentTransaction<'a> {
        ///
        /// The returned Vec has one entry for each HTLC, and in the same order.
        ///
-       /// This function is only valid in the holder commitment context, it always uses SigHashType::All.
+       /// This function is only valid in the holder commitment context, it always uses EcdsaSighashType::All.
        pub fn get_htlc_sigs<T: secp256k1::Signing>(&self, htlc_base_key: &SecretKey, channel_parameters: &DirectedChannelTransactionParameters, secp_ctx: &Secp256k1<T>) -> Result<Vec<Signature>, ()> {
                let inner = self.inner;
                let keys = &inner.keys;
@@ -1415,7 +1417,7 @@ impl<'a> TrustedCommitmentTransaction<'a> {
 
                        let htlc_redeemscript = get_htlc_redeemscript_with_explicit_keys(&this_htlc, self.opt_anchors(), &keys.broadcaster_htlc_key, &keys.countersignatory_htlc_key, &keys.revocation_key);
 
-                       let sighash = hash_to_message!(&bip143::SigHashCache::new(&htlc_tx).signature_hash(0, &htlc_redeemscript, this_htlc.amount_msat / 1000, SigHashType::All)[..]);
+                       let sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, this_htlc.amount_msat / 1000, EcdsaSighashType::All).unwrap()[..]);
                        ret.push(sign(secp_ctx, &sighash, &holder_htlc_key));
                }
                Ok(ret)
@@ -1437,15 +1439,17 @@ impl<'a> TrustedCommitmentTransaction<'a> {
 
                let htlc_redeemscript = get_htlc_redeemscript_with_explicit_keys(&this_htlc, self.opt_anchors(), &keys.broadcaster_htlc_key, &keys.countersignatory_htlc_key, &keys.revocation_key);
 
-               let sighashtype = if self.opt_anchors() { SigHashType::SinglePlusAnyoneCanPay } else { SigHashType::All };
+               let sighashtype = if self.opt_anchors() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
 
                // First push the multisig dummy, note that due to BIP147 (NULLDUMMY) it must be a zero-length element.
                htlc_tx.input[0].witness.push(Vec::new());
 
-               htlc_tx.input[0].witness.push(counterparty_signature.serialize_der().to_vec());
-               htlc_tx.input[0].witness.push(signature.serialize_der().to_vec());
-               htlc_tx.input[0].witness[1].push(sighashtype as u8);
-               htlc_tx.input[0].witness[2].push(SigHashType::All as u8);
+               let mut cp_sig_ser = counterparty_signature.serialize_der().to_vec();
+               cp_sig_ser.push(sighashtype as u8);
+               htlc_tx.input[0].witness.push(cp_sig_ser);
+               let mut holder_sig_ser = signature.serialize_der().to_vec();
+               holder_sig_ser.push(EcdsaSighashType::All as u8);
+               htlc_tx.input[0].witness.push(holder_sig_ser);
 
                if this_htlc.offered {
                        // Due to BIP146 (MINIMALIF) this must be a zero-length element to relay.
index b0551bf8323f1617f7cba4c991c783221c778c46..43032c51a3c09cdc67988e96050835fd7a17d9d7 100644 (file)
@@ -8,8 +8,8 @@
 // licenses.
 
 use bitcoin::blockdata::script::{Script,Builder};
-use bitcoin::blockdata::transaction::{Transaction, SigHashType};
-use bitcoin::util::bip143;
+use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
+use bitcoin::util::sighash;
 use bitcoin::consensus::encode;
 
 use bitcoin::hashes::Hash;
@@ -18,8 +18,8 @@ use bitcoin::hashes::sha256d::Hash as Sha256d;
 use bitcoin::hash_types::{Txid, BlockHash};
 
 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
-use bitcoin::secp256k1::key::{PublicKey,SecretKey};
-use bitcoin::secp256k1::{Secp256k1,Signature};
+use bitcoin::secp256k1::{PublicKey,SecretKey};
+use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
 use bitcoin::secp256k1;
 
 use ln::{PaymentPreimage, PaymentHash};
@@ -39,7 +39,7 @@ use util::events::ClosureReason;
 use util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
 use util::logger::Logger;
 use util::errors::APIError;
-use util::config::{UserConfig, ChannelConfig, ChannelHandshakeLimits};
+use util::config::{UserConfig, ChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits};
 use util::scid_utils::scid_from_parts;
 
 use io;
@@ -62,6 +62,17 @@ pub struct ChannelValueStat {
        pub counterparty_dust_limit_msat: u64,
 }
 
+pub struct AvailableBalances {
+       /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
+       pub balance_msat: u64,
+       /// Total amount available for our counterparty to send to us.
+       pub inbound_capacity_msat: u64,
+       /// Total amount available for us to send to our counterparty.
+       pub outbound_capacity_msat: u64,
+       /// The maximum value we can assign to the next outbound HTLC
+       pub next_outbound_htlc_limit_msat: u64,
+}
+
 #[derive(Debug, Clone, Copy, PartialEq)]
 enum FeeUpdateState {
        // Inbound states mirroring InboundHTLCState
@@ -734,9 +745,19 @@ pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
 
 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
 
-/// Maximum `funding_satoshis` value, according to the BOLT #2 specification
-/// it's 2^24.
-pub const MAX_FUNDING_SATOSHIS: u64 = 1 << 24;
+/// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
+/// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
+/// although LDK 0.0.104+ enabled serialization of channels with a different value set for
+/// `holder_max_htlc_value_in_flight_msat`.
+pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
+
+/// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
+/// `option_support_large_channel` (aka wumbo channels) is not supported.
+/// It's 2^24 - 1.
+pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
+
+/// Total bitcoin supply in satoshis.
+pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
 
 /// The maximum network dust limit for standard script formats. This currently represents the
 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
@@ -788,9 +809,22 @@ macro_rules! secp_check {
 }
 
 impl<Signer: Sign> Channel<Signer> {
-       // Convert constants + channel value to limits:
-       fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64) -> u64 {
-               channel_value_satoshis * 1000 / 10 //TODO
+       /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
+       /// `channel_value_satoshis` in msat, set through
+       /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
+       ///
+       /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
+       ///
+       /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
+       fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
+               let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
+                       1
+               } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
+                       100
+               } else {
+                       config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
+               };
+               channel_value_satoshis * 10 * configured_percent
        }
 
        /// Returns a minimum channel reserve value the remote needs to maintain,
@@ -850,8 +884,11 @@ impl<Signer: Sign> Channel<Signer> {
                let holder_signer = keys_provider.get_channel_signer(false, channel_value_satoshis);
                let pubkeys = holder_signer.pubkeys().clone();
 
-               if channel_value_satoshis >= MAX_FUNDING_SATOSHIS {
-                       return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than {}, it was {}", MAX_FUNDING_SATOSHIS, channel_value_satoshis)});
+               if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
+                       return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
+               }
+               if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
+                       return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
                }
                let channel_value_msat = channel_value_satoshis * 1000;
                if push_msat > channel_value_msat {
@@ -946,7 +983,7 @@ impl<Signer: Sign> Channel<Signer> {
                        counterparty_dust_limit_satoshis: 0,
                        holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
                        counterparty_max_htlc_value_in_flight_msat: 0,
-                       holder_max_htlc_value_in_flight_msat: Self::get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis),
+                       holder_max_htlc_value_in_flight_msat: Self::get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.own_channel_config),
                        counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
                        holder_selected_channel_reserve_satoshis,
                        counterparty_htlc_minimum_msat: 0,
@@ -1076,20 +1113,22 @@ impl<Signer: Sign> Channel<Signer> {
                }
 
                // Check sanity of message fields:
-               if msg.funding_satoshis >= MAX_FUNDING_SATOSHIS {
-                       return Err(ChannelError::Close(format!("Funding must be smaller than {}. It was {}", MAX_FUNDING_SATOSHIS, msg.funding_satoshis)));
+               if msg.funding_satoshis > config.peer_channel_config_limits.max_funding_satoshis {
+                       return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.peer_channel_config_limits.max_funding_satoshis, msg.funding_satoshis)));
+               }
+               if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
+                       return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
                }
                if msg.channel_reserve_satoshis > msg.funding_satoshis {
                        return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
                }
-               let funding_value = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
-               if msg.push_msat > funding_value {
-                       return Err(ChannelError::Close(format!("push_msat {} was larger than funding value {}", msg.push_msat, funding_value)));
+               let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
+               if msg.push_msat > full_channel_value_msat {
+                       return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
                }
                if msg.dust_limit_satoshis > msg.funding_satoshis {
                        return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
                }
-               let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
                if msg.htlc_minimum_msat >= full_channel_value_msat {
                        return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
                }
@@ -1143,6 +1182,9 @@ impl<Signer: Sign> Channel<Signer> {
                if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
                        return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
                }
+               if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
+                       return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). Channel value is ({} - {}).", holder_selected_channel_reserve_satoshis, full_channel_value_msat, msg.push_msat)));
+               }
                if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
                        log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
                                msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
@@ -1259,7 +1301,7 @@ impl<Signer: Sign> Channel<Signer> {
                        counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
                        holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
                        counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
-                       holder_max_htlc_value_in_flight_msat: Self::get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis),
+                       holder_max_htlc_value_in_flight_msat: Self::get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.own_channel_config),
                        counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
                        holder_selected_channel_reserve_satoshis,
                        counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
@@ -1904,6 +1946,10 @@ impl<Signer: Sign> Channel<Signer> {
                if msg.dust_limit_satoshis > self.holder_selected_channel_reserve_satoshis {
                        return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.holder_selected_channel_reserve_satoshis)));
                }
+               if msg.channel_reserve_satoshis > self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis {
+                       return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
+                               msg.channel_reserve_satoshis, self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis)));
+               }
                let full_channel_value_msat = (self.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
                if msg.htlc_minimum_msat >= full_channel_value_msat {
                        return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
@@ -2021,7 +2067,7 @@ impl<Signer: Sign> Channel<Signer> {
                                log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()),
                                encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
                                encode::serialize_hex(&funding_script), log_bytes!(self.channel_id()));
-                       secp_check!(self.secp_ctx.verify(&sighash, &sig, self.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
+                       secp_check!(self.secp_ctx.verify_ecdsa(&sighash, &sig, self.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
                }
 
                let counterparty_keys = self.build_remote_transaction_keys()?;
@@ -2153,7 +2199,7 @@ impl<Signer: Sign> Channel<Signer> {
                        let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
                        let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.channel_value_satoshis);
                        // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
-                       if let Err(_) = self.secp_ctx.verify(&sighash, &msg.signature, &self.get_counterparty_pubkeys().funding_pubkey) {
+                       if let Err(_) = self.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.get_counterparty_pubkeys().funding_pubkey) {
                                return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
                        }
                }
@@ -2330,40 +2376,39 @@ impl<Signer: Sign> Channel<Signer> {
                stats
        }
 
-       /// Get the available (ie not including pending HTLCs) inbound and outbound balance in msat.
+       /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
        /// Doesn't bother handling the
        /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
        /// corner case properly.
-       /// The channel reserve is subtracted from each balance.
-       /// See also [`Channel::get_balance_msat`]
-       pub fn get_inbound_outbound_available_balance_msat(&self) -> (u64, u64) {
+       pub fn get_available_balances(&self) -> AvailableBalances {
                // Note that we have to handle overflow due to the above case.
-               (
-                       cmp::max(self.channel_value_satoshis as i64 * 1000
-                               - self.value_to_self_msat as i64
-                               - self.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
-                               - self.holder_selected_channel_reserve_satoshis as i64 * 1000,
-                       0) as u64,
-                       cmp::max(self.value_to_self_msat as i64
-                               - self.get_outbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
-                               - self.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) as i64 * 1000,
-                       0) as u64
-               )
-       }
+               let outbound_stats = self.get_outbound_pending_htlc_stats(None);
 
-       /// Get our total balance in msat.
-       /// This is the amount that would go to us if we close the channel, ignoring any on-chain fees.
-       /// See also [`Channel::get_inbound_outbound_available_balance_msat`]
-       pub fn get_balance_msat(&self) -> u64 {
-               // Include our local balance, plus any inbound HTLCs we know the preimage for, minus any
-               // HTLCs sent or which will be sent after commitment signed's are exchanged.
                let mut balance_msat = self.value_to_self_msat;
                for ref htlc in self.pending_inbound_htlcs.iter() {
                        if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
                                balance_msat += htlc.amount_msat;
                        }
                }
-               balance_msat - self.get_outbound_pending_htlc_stats(None).pending_htlcs_value_msat
+               balance_msat -= outbound_stats.pending_htlcs_value_msat;
+
+               let outbound_capacity_msat = cmp::max(self.value_to_self_msat as i64
+                               - outbound_stats.pending_htlcs_value_msat as i64
+                               - self.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) as i64 * 1000,
+                       0) as u64;
+               AvailableBalances {
+                       inbound_capacity_msat: cmp::max(self.channel_value_satoshis as i64 * 1000
+                                       - self.value_to_self_msat as i64
+                                       - self.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
+                                       - self.holder_selected_channel_reserve_satoshis as i64 * 1000,
+                               0) as u64,
+                       outbound_capacity_msat,
+                       next_outbound_htlc_limit_msat: cmp::max(cmp::min(outbound_capacity_msat as i64,
+                                       self.counterparty_max_htlc_value_in_flight_msat as i64
+                                               - outbound_stats.pending_htlcs_value_msat as i64),
+                               0) as u64,
+                       balance_msat,
+               }
        }
 
        pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
@@ -2792,7 +2837,7 @@ impl<Signer: Sign> Channel<Signer> {
                                log_bytes!(msg.signature.serialize_compact()[..]),
                                log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
                                log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.channel_id()));
-                       if let Err(_) = self.secp_ctx.verify(&sighash, &msg.signature, &self.counterparty_funding_pubkey()) {
+                       if let Err(_) = self.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.counterparty_funding_pubkey()) {
                                return Err((None, ChannelError::Close("Invalid commitment tx signature from peer".to_owned())));
                        }
                        bitcoin_tx.txid
@@ -2842,12 +2887,12 @@ impl<Signer: Sign> Channel<Signer> {
                                        &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
 
                                let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.opt_anchors(), &keys);
-                               let htlc_sighashtype = if self.opt_anchors() { SigHashType::SinglePlusAnyoneCanPay } else { SigHashType::All };
-                               let htlc_sighash = hash_to_message!(&bip143::SigHashCache::new(&htlc_tx).signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype)[..]);
+                               let htlc_sighashtype = if self.opt_anchors() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
+                               let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
                                log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
                                        log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
                                        encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.channel_id()));
-                               if let Err(_) = self.secp_ctx.verify(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
+                               if let Err(_) = self.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
                                        return Err((None, ChannelError::Close("Invalid HTLC tx signature from peer".to_owned())));
                                }
                                htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source));
@@ -3715,6 +3760,15 @@ impl<Signer: Sign> Channel<Signer> {
                        }
                }
 
+               // Before we change the state of the channel, we check if the peer is sending a very old
+               // commitment transaction number, if yes we send a warning message.
+               let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number - 1;
+               if  msg.next_remote_commitment_number + 1 < our_commitment_transaction {
+                       return Err(
+                               ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
+                       );
+               }
+
                // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
                // remaining cases either succeed or ErrorMessage-fail).
                self.channel_state &= !(ChannelState::PeerDisconnected as u32);
@@ -4084,15 +4138,17 @@ impl<Signer: Sign> Channel<Signer> {
 
                let funding_key = self.get_holder_pubkeys().funding_pubkey.serialize();
                let counterparty_funding_key = self.counterparty_funding_pubkey().serialize();
+               let mut holder_sig = sig.serialize_der().to_vec();
+               holder_sig.push(EcdsaSighashType::All as u8);
+               let mut cp_sig = counterparty_sig.serialize_der().to_vec();
+               cp_sig.push(EcdsaSighashType::All as u8);
                if funding_key[..] < counterparty_funding_key[..] {
-                       tx.input[0].witness.push(sig.serialize_der().to_vec());
-                       tx.input[0].witness.push(counterparty_sig.serialize_der().to_vec());
+                       tx.input[0].witness.push(holder_sig);
+                       tx.input[0].witness.push(cp_sig);
                } else {
-                       tx.input[0].witness.push(counterparty_sig.serialize_der().to_vec());
-                       tx.input[0].witness.push(sig.serialize_der().to_vec());
+                       tx.input[0].witness.push(cp_sig);
+                       tx.input[0].witness.push(holder_sig);
                }
-               tx.input[0].witness[1].push(SigHashType::All as u8);
-               tx.input[0].witness[2].push(SigHashType::All as u8);
 
                tx.input[0].witness.push(self.get_funding_redeemscript().into_bytes());
                tx
@@ -4110,7 +4166,7 @@ impl<Signer: Sign> Channel<Signer> {
                if !self.pending_inbound_htlcs.is_empty() || !self.pending_outbound_htlcs.is_empty() {
                        return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
                }
-               if msg.fee_satoshis > 21_000_000 * 1_0000_0000 { //this is required to stop potential overflow in build_closing_transaction
+               if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
                        return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
                }
 
@@ -4130,14 +4186,14 @@ impl<Signer: Sign> Channel<Signer> {
                }
                let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.channel_value_satoshis);
 
-               match self.secp_ctx.verify(&sighash, &msg.signature, &self.get_counterparty_pubkeys().funding_pubkey) {
+               match self.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.get_counterparty_pubkeys().funding_pubkey) {
                        Ok(_) => {},
                        Err(_e) => {
                                // The remote end may have decided to revoke their output due to inconsistent dust
                                // limits, so check for that case by re-checking the signature here.
                                closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
                                let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.channel_value_satoshis);
-                               secp_check!(self.secp_ctx.verify(&sighash, &msg.signature, self.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
+                               secp_check!(self.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
                        },
                };
 
@@ -4338,7 +4394,7 @@ impl<Signer: Sign> Channel<Signer> {
                        // channel might have been used to route very small values (either by honest users or as DoS).
                        self.channel_value_satoshis * 1000 * 9 / 10,
 
-                       self.holder_max_htlc_value_in_flight_msat
+                       self.counterparty_max_htlc_value_in_flight_msat
                );
        }
 
@@ -4698,10 +4754,14 @@ impl<Signer: Sign> Channel<Signer> {
                        }
 
                        // If we've sent funding_locked (or have both sent and received funding_locked), and
-                       // the funding transaction's confirmation count has dipped below minimum_depth / 2,
+                       // the funding transaction has become unconfirmed,
                        // close the channel and hope we can get the latest state on chain (because presumably
                        // the funding transaction is at least still in the mempool of most nodes).
-                       if funding_tx_confirmations < self.minimum_depth.unwrap() as i64 / 2 {
+                       //
+                       // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf channel,
+                       // but not doing so may lead to the `ChannelManager::short_to_id` map being
+                       // inconsistent, so we currently have to.
+                       if funding_tx_confirmations == 0 && self.funding_tx_confirmed_in.is_some() {
                                let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
                                        self.minimum_depth.unwrap(), funding_tx_confirmations);
                                return Err(ClosureReason::ProcessingError { err: err_reason });
@@ -5023,12 +5083,12 @@ impl<Signer: Sign> Channel<Signer> {
 
                let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
 
-               if self.secp_ctx.verify(&msghash, &msg.node_signature, &self.get_counterparty_node_id()).is_err() {
+               if self.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.get_counterparty_node_id()).is_err() {
                        return Err(ChannelError::Close(format!(
                                "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
                                 &announcement, self.get_counterparty_node_id())));
                }
-               if self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, self.counterparty_funding_pubkey()).is_err() {
+               if self.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.counterparty_funding_pubkey()).is_err() {
                        return Err(ChannelError::Close(format!(
                                "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
                                &announcement, self.counterparty_funding_pubkey())));
@@ -5855,13 +5915,18 @@ impl<Signer: Sign> Writeable for Channel<Signer> {
                let chan_type = if self.channel_type != ChannelTypeFeatures::only_static_remote_key() {
                        Some(&self.channel_type) } else { None };
 
-               // The same logic applies for `holder_selected_channel_reserve_satoshis` and
-               // `holder_max_htlc_value_in_flight_msat` values other than the defaults.
+               // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
+               // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
+               // a different percentage of the channel value then 10%, which older versions of LDK used
+               // to set it to before the percentage was made configurable.
                let serialized_holder_selected_reserve =
                        if self.holder_selected_channel_reserve_satoshis != Self::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis)
                        { Some(self.holder_selected_channel_reserve_satoshis) } else { None };
+
+               let mut old_max_in_flight_percent_config = UserConfig::default().own_channel_config;
+               old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
                let serialized_holder_htlc_max_in_flight =
-                       if self.holder_max_htlc_value_in_flight_msat != Self::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis)
+                       if self.holder_max_htlc_value_in_flight_msat != Self::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis, &old_max_in_flight_percent_config)
                        { Some(self.holder_max_htlc_value_in_flight_msat) } else { None };
 
                write_tlv_fields!(writer, {
@@ -6131,7 +6196,7 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<(&'a K, u32)> for Channel<Signer>
                let mut target_closing_feerate_sats_per_kw = None;
                let mut monitor_pending_finalized_fulfills = Some(Vec::new());
                let mut holder_selected_channel_reserve_satoshis = Some(Self::get_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
-               let mut holder_max_htlc_value_in_flight_msat = Some(Self::get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis));
+               let mut holder_max_htlc_value_in_flight_msat = Some(Self::get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().own_channel_config));
                // Prior to supporting channel type negotiation, all of our channels were static_remotekey
                // only, so we default to that if none was written.
                let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
@@ -6317,7 +6382,7 @@ mod tests {
        use ln::PaymentHash;
        use ln::channelmanager::{HTLCSource, PaymentId};
        use ln::channel::{Channel, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator};
-       use ln::channel::MAX_FUNDING_SATOSHIS;
+       use ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS};
        use ln::features::InitFeatures;
        use ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate};
        use ln::script::ShutdownScript;
@@ -6332,15 +6397,15 @@ mod tests {
        use util::errors::APIError;
        use util::test_utils;
        use util::test_utils::OnGetShutdownScriptpubkey;
-       use bitcoin::secp256k1::{Secp256k1, Signature};
+       use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
        use bitcoin::secp256k1::ffi::Signature as FFISignature;
-       use bitcoin::secp256k1::key::{SecretKey,PublicKey};
-       use bitcoin::secp256k1::recovery::RecoverableSignature;
+       use bitcoin::secp256k1::{SecretKey,PublicKey};
+       use bitcoin::secp256k1::ecdsa::RecoverableSignature;
        use bitcoin::hashes::sha256::Hash as Sha256;
        use bitcoin::hashes::Hash;
        use bitcoin::hash_types::WPubkeyHash;
-       use core::num::NonZeroU8;
        use bitcoin::bech32::u5;
+       use bitcoin::util::address::WitnessVersion;
        use prelude::*;
 
        struct TestFeeEstimator {
@@ -6353,9 +6418,10 @@ mod tests {
        }
 
        #[test]
-       fn test_max_funding_satoshis() {
-               assert!(MAX_FUNDING_SATOSHIS <= 21_000_000 * 100_000_000,
-                       "MAX_FUNDING_SATOSHIS is greater than all satoshis in existence");
+       fn test_max_funding_satoshis_no_wumbo() {
+               assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
+               assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
+                       "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
        }
 
        #[test]
@@ -6403,7 +6469,7 @@ mod tests {
        fn upfront_shutdown_script_incompatibility() {
                let features = InitFeatures::known().clear_shutdown_anysegwit();
                let non_v0_segwit_shutdown_script =
-                       ShutdownScript::new_witness_program(NonZeroU8::new(16).unwrap(), &[0, 40]).unwrap();
+                       ShutdownScript::new_witness_program(WitnessVersion::V16, &[0, 40]).unwrap();
 
                let seed = [42; 32];
                let network = Network::Testnet;
@@ -6633,6 +6699,79 @@ mod tests {
                }
        }
 
+       #[test]
+       fn test_configured_holder_max_htlc_value_in_flight() {
+               let feeest = TestFeeEstimator{fee_est: 15000};
+               let logger = test_utils::TestLogger::new();
+               let secp_ctx = Secp256k1::new();
+               let seed = [42; 32];
+               let network = Network::Testnet;
+               let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
+               let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+               let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
+
+               let mut config_2_percent = UserConfig::default();
+               config_2_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
+               let mut config_99_percent = UserConfig::default();
+               config_99_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
+               let mut config_0_percent = UserConfig::default();
+               config_0_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
+               let mut config_101_percent = UserConfig::default();
+               config_101_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
+
+               // Test that `new_outbound` creates a channel with the correct value for
+               // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
+               // which is set to the lower bound + 1 (2%) of the `channel_value`.
+               let chan_1 = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, outbound_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap();
+               let chan_1_value_msat = chan_1.channel_value_satoshis * 1000;
+               assert_eq!(chan_1.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
+
+               // Test with the upper bound - 1 of valid values (99%).
+               let chan_2 = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, outbound_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap();
+               let chan_2_value_msat = chan_2.channel_value_satoshis * 1000;
+               assert_eq!(chan_2.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
+
+               let chan_1_open_channel_msg = chan_1.get_open_channel(genesis_block(network).header.block_hash());
+
+               // Test that `new_from_req` creates a channel with the correct value for
+               // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
+               // which is set to the lower bound - 1 (2%) of the `channel_value`.
+               let chan_3 = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, inbound_node_id, &InitFeatures::known(), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, 42).unwrap();
+               let chan_3_value_msat = chan_3.channel_value_satoshis * 1000;
+               assert_eq!(chan_3.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
+
+               // Test with the upper bound - 1 of valid values (99%).
+               let chan_4 = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, inbound_node_id, &InitFeatures::known(), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, 42).unwrap();
+               let chan_4_value_msat = chan_4.channel_value_satoshis * 1000;
+               assert_eq!(chan_4.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
+
+               // Test that `new_outbound` uses the lower bound of the configurable percentage values (1%)
+               // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
+               let chan_5 = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, outbound_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap();
+               let chan_5_value_msat = chan_5.channel_value_satoshis * 1000;
+               assert_eq!(chan_5.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
+
+               // Test that `new_outbound` uses the upper bound of the configurable percentage values
+               // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
+               // than 100.
+               let chan_6 = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, outbound_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap();
+               let chan_6_value_msat = chan_6.channel_value_satoshis * 1000;
+               assert_eq!(chan_6.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
+
+               // Test that `new_from_req` uses the lower bound of the configurable percentage values (1%)
+               // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
+               let chan_7 = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, inbound_node_id, &InitFeatures::known(), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, 42).unwrap();
+               let chan_7_value_msat = chan_7.channel_value_satoshis * 1000;
+               assert_eq!(chan_7.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
+
+               // Test that `new_from_req` uses the upper bound of the configurable percentage values
+               // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
+               // than 100.
+               let chan_8 = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, inbound_node_id, &InitFeatures::known(), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, 42).unwrap();
+               let chan_8_value_msat = chan_8.channel_value_satoshis * 1000;
+               assert_eq!(chan_8.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
+       }
+
        #[test]
        fn channel_update() {
                let feeest = TestFeeEstimator{fee_est: 15000};
@@ -6684,9 +6823,9 @@ mod tests {
        #[cfg(not(feature = "grind_signatures"))]
        #[test]
        fn outbound_commitment_test() {
-               use bitcoin::util::bip143;
+               use bitcoin::util::sighash;
                use bitcoin::consensus::encode::serialize;
-               use bitcoin::blockdata::transaction::SigHashType;
+               use bitcoin::blockdata::transaction::EcdsaSighashType;
                use bitcoin::hashes::hex::FromHex;
                use bitcoin::hash_types::Txid;
                use bitcoin::secp256k1::Message;
@@ -6795,7 +6934,7 @@ mod tests {
                                let counterparty_signature = Signature::from_der(&hex::decode($counterparty_sig_hex).unwrap()[..]).unwrap();
                                let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.channel_value_satoshis);
                                log_trace!(logger, "unsigned_tx = {}", hex::encode(serialize(&unsigned_tx.transaction)));
-                               assert!(secp_ctx.verify(&sighash, &counterparty_signature, chan.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
+                               assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
 
                                let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
                                per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
@@ -6834,9 +6973,9 @@ mod tests {
                                                chan.get_counterparty_selected_contest_delay().unwrap(),
                                                &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
                                        let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
-                                       let htlc_sighashtype = if $opt_anchors { SigHashType::SinglePlusAnyoneCanPay } else { SigHashType::All };
-                                       let htlc_sighash = Message::from_slice(&bip143::SigHashCache::new(&htlc_tx).signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype)[..]).unwrap();
-                                       assert!(secp_ctx.verify(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key).is_ok(), "verify counterparty htlc sig");
+                                       let htlc_sighashtype = if $opt_anchors { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
+                                       let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
+                                       assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key).is_ok(), "verify counterparty htlc sig");
 
                                        let mut preimage: Option<PaymentPreimage> = None;
                                        if !htlc.offered {
index fb3f71ee60a2bc493a821709e804ba9ff4c593b2..ac9e26b3f0d5498cda4e370212d85c25c79f6a6b 100644 (file)
@@ -18,7 +18,7 @@
 //! imply it needs to fail HTLCs/payments/channels it manages).
 //!
 
-use bitcoin::blockdata::block::{Block, BlockHeader};
+use bitcoin::blockdata::block::BlockHeader;
 use bitcoin::blockdata::transaction::Transaction;
 use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::network::constants::Network;
@@ -28,7 +28,7 @@ use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hashes::sha256d::Hash as Sha256dHash;
 use bitcoin::hash_types::{BlockHash, Txid};
 
-use bitcoin::secp256k1::key::{SecretKey,PublicKey};
+use bitcoin::secp256k1::{SecretKey,PublicKey};
 use bitcoin::secp256k1::Secp256k1;
 use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1;
@@ -48,6 +48,7 @@ use ln::msgs;
 use ln::msgs::NetAddress;
 use ln::onion_utils;
 use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT, OptionalField};
+use ln::wire::Encode;
 use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner, Recipient};
 use util::config::UserConfig;
 use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
@@ -159,20 +160,26 @@ pub(crate) struct HTLCPreviousHopData {
 }
 
 enum OnionPayload {
-       /// Contains a total_msat (which may differ from value if this is a Multi-Path Payment) and a
-       /// payment_secret which prevents path-probing attacks and can associate different HTLCs which
-       /// are part of the same payment.
-       Invoice(msgs::FinalOnionHopData),
+       /// Indicates this incoming onion payload is for the purpose of paying an invoice.
+       Invoice {
+               /// This is only here for backwards-compatibility in serialization, in the future it can be
+               /// removed, breaking clients running 0.0.106 and earlier.
+               _legacy_hop_data: msgs::FinalOnionHopData,
+       },
        /// Contains the payer-provided preimage.
        Spontaneous(PaymentPreimage),
 }
 
+/// HTLCs that are to us and can be failed/claimed by the user
 struct ClaimableHTLC {
        prev_hop: HTLCPreviousHopData,
        cltv_expiry: u32,
+       /// The amount (in msats) of this MPP part
        value: u64,
        onion_payload: OnionPayload,
        timer_ticks: u8,
+       /// The sum total of all MPP parts
+       total_msat: u64,
 }
 
 /// A payment identifier used to uniquely identify a payment to LDK.
@@ -1005,6 +1012,13 @@ pub struct ChannelDetails {
        /// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we
        /// should be able to spend nearly this amount.
        pub outbound_capacity_msat: u64,
+       /// The available outbound capacity for sending a single HTLC to the remote peer. This is
+       /// similar to [`ChannelDetails::outbound_capacity_msat`] but it may be further restricted by
+       /// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us
+       /// to use a limit as close as possible to the HTLC limit we can currently send.
+       ///
+       /// See also [`ChannelDetails::balance_msat`] and [`ChannelDetails::outbound_capacity_msat`].
+       pub next_outbound_htlc_limit_msat: u64,
        /// The available inbound capacity for the remote peer to send HTLCs to us. This does not
        /// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
        /// available for inclusion in new inbound HTLCs).
@@ -1509,8 +1523,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        ///
        /// Non-proportional fees are fixed according to our risk using the provided fee estimator.
        ///
-       /// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`!
-       ///
        /// Users need to notify the new ChannelManager when a new block is connected or
        /// disconnected using its `block_connected` and `block_disconnected` methods, starting
        /// from after `params.latest_hash`.
@@ -1670,8 +1682,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        let channel_state = self.channel_state.lock().unwrap();
                        res.reserve(channel_state.by_id.len());
                        for (channel_id, channel) in channel_state.by_id.iter().filter(f) {
-                               let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
-                               let balance_msat = channel.get_balance_msat();
+                               let balance = channel.get_available_balances();
                                let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
                                        channel.get_holder_counterparty_selected_channel_reserve_satoshis();
                                res.push(ChannelDetails {
@@ -1698,9 +1709,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        inbound_scid_alias: channel.latest_inbound_scid_alias(),
                                        channel_value_satoshis: channel.get_value_satoshis(),
                                        unspendable_punishment_reserve: to_self_reserve_satoshis,
-                                       balance_msat,
-                                       inbound_capacity_msat,
-                                       outbound_capacity_msat,
+                                       balance_msat: balance.balance_msat,
+                                       inbound_capacity_msat: balance.inbound_capacity_msat,
+                                       outbound_capacity_msat: balance.outbound_capacity_msat,
+                                       next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
                                        user_channel_id: channel.get_user_id(),
                                        confirmations_required: channel.minimum_depth(),
                                        force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
@@ -2059,11 +2071,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        return_malformed_err!("invalid ephemeral pubkey", 0x8000 | 0x4000 | 6);
                }
 
-               let shared_secret = {
-                       let mut arr = [0; 32];
-                       arr.copy_from_slice(&SharedSecret::new(&msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key)[..]);
-                       arr
-               };
+               let shared_secret = SharedSecret::new(&msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key).secret_bytes();
 
                if msg.onion_routing_packet.version != 0 {
                        //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other
@@ -2242,7 +2250,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        break None;
                                }
                                {
-                                       let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 8 + 2));
+                                       let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
                                        if let Some(chan_update) = chan_update {
                                                if code == 0x1000 | 11 || code == 0x1000 | 12 {
                                                        msg.amount_msat.write(&mut res).expect("Writes cannot fail");
@@ -2254,7 +2262,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
                                                        0u16.write(&mut res).expect("Writes cannot fail");
                                                }
-                                               (chan_update.serialized_length() as u16).write(&mut res).expect("Writes cannot fail");
+                                               (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
+                                               msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
                                                chan_update.write(&mut res).expect("Writes cannot fail");
                                        }
                                        return_err!(err, code, &res.0[..]);
@@ -2313,7 +2322,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                };
 
                let msg_hash = Sha256dHash::hash(&unsigned.encode()[..]);
-               let sig = self.secp_ctx.sign(&hash_to_message!(&msg_hash[..]), &self.our_network_key);
+               let sig = self.secp_ctx.sign_ecdsa(&hash_to_message!(&msg_hash[..]), &self.our_network_key);
 
                Ok(msgs::ChannelUpdate {
                        signature: sig,
@@ -2914,11 +2923,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        if let PendingHTLCRouting::Forward { onion_packet, .. } = routing {
                                                                                                let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode);
                                                                                                if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id) {
-                                                                                                       let phantom_shared_secret = {
-                                                                                                               let mut arr = [0; 32];
-                                                                                                               arr.copy_from_slice(&SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap())[..]);
-                                                                                                               arr
-                                                                                                       };
+                                                                                                       let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes();
                                                                                                        let next_hop = match onion_utils::decode_next_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
                                                                                                                Ok(res) => res,
                                                                                                                Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
@@ -3091,11 +3096,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
                                                                        routing, incoming_shared_secret, payment_hash, amt_to_forward, .. },
                                                                        prev_funding_outpoint } => {
-                                                               let (cltv_expiry, onion_payload, phantom_shared_secret) = match routing {
-                                                                       PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry, phantom_shared_secret } =>
-                                                                               (incoming_cltv_expiry, OnionPayload::Invoice(payment_data), phantom_shared_secret),
+                                                               let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret) = match routing {
+                                                                       PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry, phantom_shared_secret } => {
+                                                                               let _legacy_hop_data = payment_data.clone();
+                                                                               (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data }, Some(payment_data), phantom_shared_secret)
+                                                                       },
                                                                        PendingHTLCRouting::ReceiveKeysend { payment_preimage, incoming_cltv_expiry } =>
-                                                                               (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage), None),
+                                                                               (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage), None, None),
                                                                        _ => {
                                                                                panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
                                                                        }
@@ -3110,6 +3117,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                        },
                                                                        value: amt_to_forward,
                                                                        timer_ticks: 0,
+                                                                       total_msat: if let Some(data) = &payment_data { data.total_msat } else { amt_to_forward },
                                                                        cltv_expiry,
                                                                        onion_payload,
                                                                };
@@ -3133,7 +3141,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                }
 
                                                                macro_rules! check_total_value {
-                                                                       ($payment_data_total_msat: expr, $payment_secret: expr, $payment_preimage: expr) => {{
+                                                                       ($payment_data: expr, $payment_preimage: expr) => {{
                                                                                let mut payment_received_generated = false;
                                                                                let htlcs = channel_state.claimable_htlcs.entry(payment_hash)
                                                                                        .or_insert(Vec::new());
@@ -3148,10 +3156,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                for htlc in htlcs.iter() {
                                                                                        total_value += htlc.value;
                                                                                        match &htlc.onion_payload {
-                                                                                               OnionPayload::Invoice(htlc_payment_data) => {
-                                                                                                       if htlc_payment_data.total_msat != $payment_data_total_msat {
+                                                                                               OnionPayload::Invoice { .. } => {
+                                                                                                       if htlc.total_msat != $payment_data.total_msat {
                                                                                                                log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
-                                                                                                                       log_bytes!(payment_hash.0), $payment_data_total_msat, htlc_payment_data.total_msat);
+                                                                                                                       log_bytes!(payment_hash.0), $payment_data.total_msat, htlc.total_msat);
                                                                                                                total_value = msgs::MAX_VALUE_MSAT;
                                                                                                        }
                                                                                                        if total_value >= msgs::MAX_VALUE_MSAT { break; }
@@ -3159,17 +3167,17 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                                _ => unreachable!(),
                                                                                        }
                                                                                }
-                                                                               if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data_total_msat {
+                                                                               if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data.total_msat {
                                                                                        log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)",
-                                                                                               log_bytes!(payment_hash.0), total_value, $payment_data_total_msat);
+                                                                                               log_bytes!(payment_hash.0), total_value, $payment_data.total_msat);
                                                                                        fail_htlc!(claimable_htlc);
-                                                                               } else if total_value == $payment_data_total_msat {
+                                                                               } else if total_value == $payment_data.total_msat {
                                                                                        htlcs.push(claimable_htlc);
                                                                                        new_events.push(events::Event::PaymentReceived {
                                                                                                payment_hash,
                                                                                                purpose: events::PaymentPurpose::InvoicePayment {
                                                                                                        payment_preimage: $payment_preimage,
-                                                                                                       payment_secret: $payment_secret,
+                                                                                                       payment_secret: $payment_data.payment_secret,
                                                                                                },
                                                                                                amt: total_value,
                                                                                        });
@@ -3194,17 +3202,16 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                match payment_secrets.entry(payment_hash) {
                                                                        hash_map::Entry::Vacant(_) => {
                                                                                match claimable_htlc.onion_payload {
-                                                                                       OnionPayload::Invoice(ref payment_data) => {
-                                                                                               let payment_preimage = match inbound_payment::verify(payment_hash, payment_data.clone(), self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
+                                                                                       OnionPayload::Invoice { .. } => {
+                                                                                               let payment_data = payment_data.unwrap();
+                                                                                               let payment_preimage = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
                                                                                                        Ok(payment_preimage) => payment_preimage,
                                                                                                        Err(()) => {
                                                                                                                fail_htlc!(claimable_htlc);
                                                                                                                continue
                                                                                                        }
                                                                                                };
-                                                                                               let payment_data_total_msat = payment_data.total_msat;
-                                                                                               let payment_secret = payment_data.payment_secret.clone();
-                                                                                               check_total_value!(payment_data_total_msat, payment_secret, payment_preimage);
+                                                                                               check_total_value!(payment_data, payment_preimage);
                                                                                        },
                                                                                        OnionPayload::Spontaneous(preimage) => {
                                                                                                match channel_state.claimable_htlcs.entry(payment_hash) {
@@ -3225,14 +3232,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                }
                                                                        },
                                                                        hash_map::Entry::Occupied(inbound_payment) => {
-                                                                               let payment_data =
-                                                                                       if let OnionPayload::Invoice(ref data) = claimable_htlc.onion_payload {
-                                                                                               data.clone()
-                                                                                       } else {
-                                                                                               log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
-                                                                                               fail_htlc!(claimable_htlc);
-                                                                                               continue
-                                                                                       };
+                                                                               if payment_data.is_none() {
+                                                                                       log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
+                                                                                       fail_htlc!(claimable_htlc);
+                                                                                       continue
+                                                                               };
+                                                                               let payment_data = payment_data.unwrap();
                                                                                if inbound_payment.get().payment_secret != payment_data.payment_secret {
                                                                                        log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0));
                                                                                        fail_htlc!(claimable_htlc);
@@ -3241,7 +3246,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                                log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
                                                                                        fail_htlc!(claimable_htlc);
                                                                                } else {
-                                                                                       let payment_received_generated = check_total_value!(payment_data.total_msat, payment_data.payment_secret, inbound_payment.get().payment_preimage);
+                                                                                       let payment_received_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage);
                                                                                        if payment_received_generated {
                                                                                                inbound_payment.remove_entry();
                                                                                        }
@@ -3460,10 +3465,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                debug_assert!(false);
                                                return false;
                                        }
-                                       if let OnionPayload::Invoice(ref final_hop_data) = htlcs[0].onion_payload {
+                                       if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
                                                // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
                                                // In this case we're not going to handle any timeouts of the parts here.
-                                               if final_hop_data.total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
+                                               if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
                                                        return true;
                                                } else if htlcs.into_iter().any(|htlc| {
                                                        htlc.timer_ticks += 1;
@@ -3540,12 +3545,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel<Signer>) -> (u16, Vec<u8>) {
                debug_assert_eq!(desired_err_code & 0x1000, 0x1000);
                if let Ok(upd) = self.get_channel_update_for_onion(scid, chan) {
-                       let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 4));
+                       let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 6));
                        if desired_err_code == 0x1000 | 20 {
                                // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
                                0u16.write(&mut enc).expect("Writes cannot fail");
                        }
-                       (upd.serialized_length() as u16).write(&mut enc).expect("Writes cannot fail");
+                       (upd.serialized_length() as u16 + 2).write(&mut enc).expect("Writes cannot fail");
+                       msgs::ChannelUpdate::TYPE.write(&mut enc).expect("Writes cannot fail");
                        upd.write(&mut enc).expect("Writes cannot fail");
                        (desired_err_code, enc.0)
                } else {
@@ -5307,18 +5313,17 @@ where
        F::Target: FeeEstimator,
        L::Target: Logger,
 {
-       fn block_connected(&self, block: &Block, height: u32) {
+       fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
                {
                        let best_block = self.best_block.read().unwrap();
-                       assert_eq!(best_block.block_hash(), block.header.prev_blockhash,
+                       assert_eq!(best_block.block_hash(), header.prev_blockhash,
                                "Blocks must be connected in chain-order - the connected header must build on the last connected header");
                        assert_eq!(best_block.height(), height - 1,
                                "Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
                }
 
-               let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
-               self.transactions_confirmed(&block.header, &txdata, height);
-               self.best_block_updated(&block.header, height);
+               self.transactions_confirmed(header, txdata, height);
+               self.best_block_updated(header, height);
        }
 
        fn block_disconnected(&self, header: &BlockHeader, height: u32) {
@@ -5697,39 +5702,21 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                        let channel_state = &mut *channel_state_lock;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
                        let short_to_id = &mut channel_state.short_to_id;
-                       if no_connection_possible {
-                               log_debug!(self.logger, "Failing all channels with {} due to no_connection_possible", log_pubkey!(counterparty_node_id));
-                               channel_state.by_id.retain(|_, chan| {
-                                       if chan.get_counterparty_node_id() == *counterparty_node_id {
+                       log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
+                               log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
+                       channel_state.by_id.retain(|_, chan| {
+                               if chan.get_counterparty_node_id() == *counterparty_node_id {
+                                       chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
+                                       if chan.is_shutdown() {
                                                update_maps_on_chan_removal!(self, short_to_id, chan);
-                                               failed_channels.push(chan.force_shutdown(true));
-                                               if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                               msg: update
-                                                       });
-                                               }
                                                self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
-                                               false
+                                               return false;
                                        } else {
-                                               true
+                                               no_channels_remain = false;
                                        }
-                               });
-                       } else {
-                               log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(counterparty_node_id));
-                               channel_state.by_id.retain(|_, chan| {
-                                       if chan.get_counterparty_node_id() == *counterparty_node_id {
-                                               chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
-                                               if chan.is_shutdown() {
-                                                       update_maps_on_chan_removal!(self, short_to_id, chan);
-                                                       self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
-                                                       return false;
-                                               } else {
-                                                       no_channels_remain = false;
-                                               }
-                                       }
-                                       true
-                               })
-                       }
+                               }
+                               true
+                       });
                        pending_msg_events.retain(|msg| {
                                match msg {
                                        &events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != counterparty_node_id,
@@ -5941,6 +5928,9 @@ impl_writeable_tlv_based!(ChannelDetails, {
        (14, user_channel_id, required),
        (16, balance_msat, required),
        (18, outbound_capacity_msat, required),
+       // Note that by the time we get past the required read above, outbound_capacity_msat will be
+       // filled in, so we can safely unwrap it here.
+       (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap())),
        (20, inbound_capacity_msat, required),
        (22, confirmations_required, option),
        (24, force_close_spend_delay, option),
@@ -6066,20 +6056,21 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, {
 impl Writeable for ClaimableHTLC {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                let payment_data = match &self.onion_payload {
-                       OnionPayload::Invoice(data) => Some(data.clone()),
+                       OnionPayload::Invoice { _legacy_hop_data } => Some(_legacy_hop_data),
                        _ => None,
                };
                let keysend_preimage = match self.onion_payload {
-                       OnionPayload::Invoice(_) => None,
+                       OnionPayload::Invoice { .. } => None,
                        OnionPayload::Spontaneous(preimage) => Some(preimage.clone()),
                };
-               write_tlv_fields!
-               (writer,
-                {
-                  (0, self.prev_hop, required), (2, self.value, required),
-                  (4, payment_data, option), (6, self.cltv_expiry, required),
-                        (8, keysend_preimage, option),
-                });
+               write_tlv_fields!(writer, {
+                       (0, self.prev_hop, required),
+                       (1, self.total_msat, required),
+                       (2, self.value, required),
+                       (4, payment_data, option),
+                       (6, self.cltv_expiry, required),
+                       (8, keysend_preimage, option),
+               });
                Ok(())
        }
 }
@@ -6090,32 +6081,41 @@ impl Readable for ClaimableHTLC {
                let mut value = 0;
                let mut payment_data: Option<msgs::FinalOnionHopData> = None;
                let mut cltv_expiry = 0;
+               let mut total_msat = None;
                let mut keysend_preimage: Option<PaymentPreimage> = None;
-               read_tlv_fields!
-               (reader,
-                {
-                  (0, prev_hop, required), (2, value, required),
-                  (4, payment_data, option), (6, cltv_expiry, required),
-                        (8, keysend_preimage, option)
-                });
+               read_tlv_fields!(reader, {
+                       (0, prev_hop, required),
+                       (1, total_msat, option),
+                       (2, value, required),
+                       (4, payment_data, option),
+                       (6, cltv_expiry, required),
+                       (8, keysend_preimage, option)
+               });
                let onion_payload = match keysend_preimage {
                        Some(p) => {
                                if payment_data.is_some() {
                                        return Err(DecodeError::InvalidValue)
                                }
+                               if total_msat.is_none() {
+                                       total_msat = Some(value);
+                               }
                                OnionPayload::Spontaneous(p)
                        },
                        None => {
                                if payment_data.is_none() {
                                        return Err(DecodeError::InvalidValue)
                                }
-                               OnionPayload::Invoice(payment_data.unwrap())
+                               if total_msat.is_none() {
+                                       total_msat = Some(payment_data.as_ref().unwrap().total_msat);
+                               }
+                               OnionPayload::Invoice { _legacy_hop_data: payment_data.unwrap() }
                        },
                };
                Ok(Self {
                        prev_hop: prev_hop.0.unwrap(),
                        timer_ticks: 0,
                        value,
+                       total_msat: total_msat.unwrap(),
                        onion_payload,
                        cltv_expiry,
                })
@@ -7316,7 +7316,7 @@ mod tests {
                // payment verification fails as expected.
                let mut bad_payment_hash = payment_hash.clone();
                bad_payment_hash.0[0] += 1;
-               match inbound_payment::verify(bad_payment_hash, payment_data.clone(), nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) {
+               match inbound_payment::verify(bad_payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) {
                        Ok(_) => panic!("Unexpected ok"),
                        Err(()) => {
                                nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment".to_string(), "Failing HTLC with user-generated payment_hash".to_string(), 1);
@@ -7324,7 +7324,7 @@ mod tests {
                }
 
                // Check that using the original payment hash succeeds.
-               assert!(inbound_payment::verify(payment_hash, payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok());
+               assert!(inbound_payment::verify(payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok());
        }
 }
 
index 4512ee8002996aced4ba6238c6d7a42233608955..2580874640e438e611bea9b88ddda0384dc9db5d 100644 (file)
@@ -137,7 +137,7 @@ mod sealed {
                        // Byte 1
                        ,
                        // Byte 2
-                       BasicMPP,
+                       BasicMPP | Wumbo,
                        // Byte 3
                        ShutdownAnySegwit,
                        // Byte 4
@@ -169,7 +169,7 @@ mod sealed {
                        // Byte 1
                        ,
                        // Byte 2
-                       BasicMPP,
+                       BasicMPP | Wumbo,
                        // Byte 3
                        ShutdownAnySegwit,
                        // Byte 4
@@ -390,6 +390,9 @@ mod sealed {
        define_feature!(17, BasicMPP, [InitContext, NodeContext, InvoiceContext],
                "Feature flags for `basic_mpp`.", set_basic_mpp_optional, set_basic_mpp_required,
                supports_basic_mpp, requires_basic_mpp);
+       define_feature!(19, Wumbo, [InitContext, NodeContext],
+               "Feature flags for `option_support_large_channel` (aka wumbo channels).", set_wumbo_optional, set_wumbo_required,
+               supports_wumbo, requires_wumbo);
        define_feature!(27, ShutdownAnySegwit, [InitContext, NodeContext],
                "Feature flags for `opt_shutdown_anysegwit`.", set_shutdown_any_segwit_optional,
                set_shutdown_any_segwit_required, supports_shutdown_anysegwit, requires_shutdown_anysegwit);
@@ -740,6 +743,15 @@ impl<T: sealed::ShutdownAnySegwit> Features<T> {
                self
        }
 }
+
+impl<T: sealed::Wumbo> Features<T> {
+       #[cfg(test)]
+       pub(crate) fn clear_wumbo(mut self) -> Self {
+               <T as sealed::Wumbo>::clear_bits(&mut self.flags);
+               self
+       }
+}
+
 macro_rules! impl_feature_len_prefixed_write {
        ($features: ident) => {
                impl Writeable for $features {
@@ -843,6 +855,11 @@ mod tests {
                assert!(!InitFeatures::known().requires_scid_privacy());
                assert!(!NodeFeatures::known().requires_scid_privacy());
 
+               assert!(InitFeatures::known().supports_wumbo());
+               assert!(NodeFeatures::known().supports_wumbo());
+               assert!(!InitFeatures::known().requires_wumbo());
+               assert!(!NodeFeatures::known().requires_wumbo());
+
                let mut init_features = InitFeatures::known();
                assert!(init_features.initial_routing_sync());
                init_features.clear_initial_routing_sync();
@@ -878,14 +895,14 @@ mod tests {
                        // Check that the flags are as expected:
                        // - option_data_loss_protect
                        // - var_onion_optin (req) | static_remote_key (req) | payment_secret(req)
-                       // - basic_mpp
+                       // - basic_mpp | wumbo
                        // - opt_shutdown_anysegwit
                        // -
                        // - option_channel_type | option_scid_alias
                        assert_eq!(node_features.flags.len(), 6);
                        assert_eq!(node_features.flags[0], 0b00000010);
                        assert_eq!(node_features.flags[1], 0b01010001);
-                       assert_eq!(node_features.flags[2], 0b00000010);
+                       assert_eq!(node_features.flags[2], 0b00001010);
                        assert_eq!(node_features.flags[3], 0b00001000);
                        assert_eq!(node_features.flags[4], 0b00000000);
                        assert_eq!(node_features.flags[5], 0b10100000);
index dbeb9cc44a33f1840593f77cc59844fe1d153b3d..39950de9afd8aa7359cf8584c9d804675e161411 100644 (file)
@@ -35,7 +35,7 @@ use bitcoin::network::constants::Network;
 
 use bitcoin::hash_types::BlockHash;
 
-use bitcoin::secp256k1::key::PublicKey;
+use bitcoin::secp256k1::PublicKey;
 
 use io;
 use prelude::*;
@@ -859,7 +859,7 @@ macro_rules! check_spends {
                        for output in $tx.output.iter() {
                                total_value_out += output.value;
                        }
-                       let min_fee = ($tx.get_weight() as u64 + 3) / 4; // One sat per vbyte (ie per weight/4, rounded up)
+                       let min_fee = ($tx.weight() as u64 + 3) / 4; // One sat per vbyte (ie per weight/4, rounded up)
                        // Input amount - output amount = fee, so check that out + min_fee is smaller than input
                        assert!(total_value_out + min_fee <= total_value_in);
                        $tx.verify(get_output).unwrap();
@@ -1167,6 +1167,21 @@ macro_rules! get_payment_preimage_hash {
        }
 }
 
+#[macro_export]
+macro_rules! get_route {
+       ($send_node: expr, $payment_params: expr, $recv_value: expr, $cltv: expr) => {{
+               use $crate::chain::keysinterface::KeysInterface;
+               let scorer = $crate::util::test_utils::TestScorer::with_penalty(0);
+               let keys_manager = $crate::util::test_utils::TestKeysInterface::new(&[0u8; 32], bitcoin::network::constants::Network::Testnet);
+               let random_seed_bytes = keys_manager.get_secure_random_bytes();
+               $crate::routing::router::get_route(
+                       &$send_node.node.get_our_node_id(), &$payment_params, &$send_node.network_graph.read_only(),
+                       Some(&$send_node.node.list_usable_channels().iter().collect::<Vec<_>>()),
+                       $recv_value, $cltv, $send_node.logger, &scorer, &random_seed_bytes
+               )
+       }}
+}
+
 #[cfg(test)]
 #[macro_export]
 macro_rules! get_route_and_payment_hash {
@@ -1176,17 +1191,9 @@ macro_rules! get_route_and_payment_hash {
                $crate::get_route_and_payment_hash!($send_node, $recv_node, payment_params, $recv_value, TEST_FINAL_CLTV)
        }};
        ($send_node: expr, $recv_node: expr, $payment_params: expr, $recv_value: expr, $cltv: expr) => {{
-               use $crate::chain::keysinterface::KeysInterface;
                let (payment_preimage, payment_hash, payment_secret) = $crate::get_payment_preimage_hash!($recv_node, Some($recv_value));
-               let scorer = $crate::util::test_utils::TestScorer::with_penalty(0);
-               let keys_manager = $crate::util::test_utils::TestKeysInterface::new(&[0u8; 32], bitcoin::network::constants::Network::Testnet);
-               let random_seed_bytes = keys_manager.get_secure_random_bytes();
-               let route = $crate::routing::router::get_route(
-                       &$send_node.node.get_our_node_id(), &$payment_params, &$send_node.network_graph.read_only(),
-                       Some(&$send_node.node.list_usable_channels().iter().collect::<Vec<_>>()),
-                       $recv_value, $cltv, $send_node.logger, &scorer, &random_seed_bytes
-               ).unwrap();
-               (route, payment_hash, payment_preimage, payment_secret)
+               let route = $crate::get_route!($send_node, $payment_params, $recv_value, $cltv);
+               (route.unwrap(), payment_hash, payment_preimage, payment_secret)
        }}
 }
 
@@ -1655,15 +1662,7 @@ pub const TEST_FINAL_CLTV: u32 = 70;
 pub fn route_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash, PaymentSecret) {
        let payment_params = PaymentParameters::from_node_id(expected_route.last().unwrap().node.get_our_node_id())
                .with_features(InvoiceFeatures::known());
-       let network_graph = origin_node.network_graph.read_only();
-       let scorer = test_utils::TestScorer::with_penalty(0);
-       let seed = [0u8; 32];
-       let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
-       let random_seed_bytes = keys_manager.get_secure_random_bytes();
-       let route = get_route(
-               &origin_node.node.get_our_node_id(), &payment_params, &network_graph,
-               Some(&origin_node.node.list_usable_channels().iter().collect::<Vec<_>>()),
-               recv_value, TEST_FINAL_CLTV, origin_node.logger, &scorer, &random_seed_bytes).unwrap();
+       let route = get_route!(origin_node, payment_params, recv_value, TEST_FINAL_CLTV).unwrap();
        assert_eq!(route.paths.len(), 1);
        assert_eq!(route.paths[0].len(), expected_route.len());
        for (node, hop) in expected_route.iter().zip(route.paths[0].iter()) {
@@ -2010,7 +2009,10 @@ pub fn check_preimage_claim<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, prev_txn: &Vec<
        for tx in prev_txn {
                if node_txn[0].input[0].previous_output.txid == tx.txid() {
                        check_spends!(node_txn[0], tx);
-                       assert!(node_txn[0].input[0].witness[2].len() > 106); // must spend an htlc output
+                       let mut iter = node_txn[0].input[0].witness.iter();
+                       iter.next().expect("expected 3 witness items");
+                       iter.next().expect("expected 3 witness items");
+                       assert!(iter.next().expect("expected 3 witness items").len() > 106); // must spend an htlc output
                        assert_eq!(tx.input.len(), 1); // must spend a commitment tx
 
                        found_prev = true;
index c09ff85f46a9bfe8d8991a9b3aa5ed901fe0f203..8b06f9fe4f41b9eb209589c55b15a69069ee0826 100644 (file)
@@ -26,7 +26,7 @@ use ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputI
 use routing::router::{PaymentParameters, Route, RouteHop, RouteParameters, find_route, get_route};
 use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures};
 use ln::msgs;
-use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
+use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField, ErrorAction};
 use util::enforcing_trait_impls::EnforcingSigner;
 use util::{byte_utils, test_utils};
 use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason};
@@ -42,7 +42,7 @@ use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::network::constants::Network;
 
 use bitcoin::secp256k1::Secp256k1;
-use bitcoin::secp256k1::key::{PublicKey,SecretKey};
+use bitcoin::secp256k1::{PublicKey,SecretKey};
 
 use regex;
 
@@ -58,9 +58,12 @@ use ln::chan_utils::CommitmentTransaction;
 #[test]
 fn test_insane_channel_opens() {
        // Stand up a network of 2 nodes
+       use ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS;
+       let mut cfg = UserConfig::default();
+       cfg.peer_channel_config_limits.max_funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1;
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(cfg)]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        // Instantiate channel parameters where we push the maximum msats given our
@@ -92,15 +95,15 @@ fn test_insane_channel_opens() {
                } else { assert!(false); }
        };
 
-       use ln::channel::MAX_FUNDING_SATOSHIS;
        use ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
 
        // Test all mutations that would make the channel open message insane
-       insane_open_helper(format!("Funding must be smaller than {}. It was {}", MAX_FUNDING_SATOSHIS, MAX_FUNDING_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = MAX_FUNDING_SATOSHIS; msg });
+       insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
+       insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
 
        insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.funding_satoshis + 1; msg });
 
-       insane_open_helper(r"push_msat \d+ was larger than funding value \d+", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
+       insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
 
        insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg });
 
@@ -113,6 +116,25 @@ fn test_insane_channel_opens() {
        insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg });
 }
 
+#[test]
+fn test_funding_exceeds_no_wumbo_limit() {
+       // Test that if a peer does not support wumbo channels, we'll refuse to open a wumbo channel to
+       // them.
+       use ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO;
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       node_cfgs[1].features = InitFeatures::known().clear_wumbo();
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       match nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None) {
+               Err(APIError::APIMisuseError { err }) => {
+                       assert_eq!(format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1), err);
+               },
+               _ => panic!()
+       }
+}
+
 fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
        // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure,
        // but only for them. Because some LSPs do it with some level of trust of the clients (for a
@@ -2156,9 +2178,9 @@ fn channel_monitor_network_test() {
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
 
        // Simple case with no pending HTLCs:
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true);
+       nodes[1].node.force_close_channel(&chan_1.2).unwrap();
        check_added_monitors!(nodes[1], 1);
-       check_closed_broadcast!(nodes[1], false);
+       check_closed_broadcast!(nodes[1], true);
        {
                let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
                assert_eq!(node_txn.len(), 1);
@@ -2170,15 +2192,15 @@ fn channel_monitor_network_test() {
        assert_eq!(nodes[0].node.list_channels().len(), 0);
        assert_eq!(nodes[1].node.list_channels().len(), 1);
        check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
-       check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
 
        // One pending HTLC is discarded by the force-close:
        let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
 
        // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
        // broadcasted until we reach the timelock time).
-       nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true);
-       check_closed_broadcast!(nodes[1], false);
+       nodes[1].node.force_close_channel(&chan_2.2).unwrap();
+       check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        {
                let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE);
@@ -2191,7 +2213,7 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[2], true);
        assert_eq!(nodes[1].node.list_channels().len(), 0);
        assert_eq!(nodes[2].node.list_channels().len(), 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
        check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
 
        macro_rules! claim_funds {
@@ -2216,9 +2238,9 @@ fn channel_monitor_network_test() {
 
        // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
        // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
-       nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
+       nodes[2].node.force_close_channel(&chan_3.2).unwrap();
        check_added_monitors!(nodes[2], 1);
-       check_closed_broadcast!(nodes[2], false);
+       check_closed_broadcast!(nodes[2], true);
        let node2_commitment_txid;
        {
                let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
@@ -2235,7 +2257,7 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[3], true);
        assert_eq!(nodes[2].node.list_channels().len(), 0);
        assert_eq!(nodes[3].node.list_channels().len(), 1);
-       check_closed_event!(nodes[2], 1, ClosureReason::DisconnectedPeer);
+       check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
        check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
 
        // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
@@ -3469,6 +3491,47 @@ fn test_dup_events_on_peer_disconnect() {
        expect_payment_path_successful!(nodes[0]);
 }
 
+#[test]
+fn test_peer_disconnected_before_funding_broadcasted() {
+       // Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects
+       // before the funding transaction has been broadcasted.
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // Open a channel between `nodes[0]` and `nodes[1]`, for which the funding transaction is never
+       // broadcasted, even though it's created by `nodes[0]`.
+       let expected_temporary_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
+       let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel);
+       let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
+
+       let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], 1_000_000, 42);
+       assert_eq!(temporary_channel_id, expected_temporary_channel_id);
+
+       assert!(nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).is_ok());
+
+       let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+       assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id);
+
+       // Even though the funding transaction is created by `nodes[0]`, the `FundingCreated` msg is
+       // never sent to `nodes[1]`, and therefore the tx is never signed by either party nor
+       // broadcasted.
+       {
+               assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
+       }
+
+       // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` when the peers are
+       // disconnected before the funding transaction was broadcasted.
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+       check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
+       check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+}
+
 #[test]
 fn test_simple_peer_disconnect() {
        // Test that we can reconnect when there are no lost messages
@@ -5727,8 +5790,8 @@ fn test_key_derivation_params() {
        check_spends!(local_txn_1[0], chan_1.3);
 
        // We check funding pubkey are unique
-       let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness[3][36..69]));
-       let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness[3][36..69]));
+       let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][36..69]));
+       let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][36..69]));
        if from_0_funding_key_0 == from_1_funding_key_0
            || from_0_funding_key_0 == from_1_funding_key_1
            || from_0_funding_key_1 == from_1_funding_key_0
@@ -7326,7 +7389,7 @@ fn test_data_loss_protect() {
        logger = test_utils::TestLogger::with_id(format!("node {}", 0));
        let mut chain_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut io::Cursor::new(previous_chain_monitor_state.0), keys_manager).unwrap().1;
        chain_source = test_utils::TestChainSource::new(Network::Testnet);
-       tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
+       tx_broadcaster = test_utils::TestBroadcaster { txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new())) };
        fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
        persister = test_utils::TestPersister::new();
        monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &fee_estimator, &persister, keys_manager);
@@ -7383,22 +7446,48 @@ fn test_data_loss_protect() {
        }
 
        // Check we close channel detecting A is fallen-behind
+       // Check that we sent the warning message when we detected that A has fallen behind,
+       // and give the possibility for A to recover from the warning.
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Peer attempted to reestablish channel with a very old local commitment transaction".to_string() });
-       assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction");
-       check_added_monitors!(nodes[1], 1);
+       let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned();
+       assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg));
 
        // Check A is able to claim to_remote output
-       let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
-       assert_eq!(node_txn.len(), 1);
-       check_spends!(node_txn[0], chan.3);
-       assert_eq!(node_txn[0].output.len(), 2);
-       mine_transaction(&nodes[0], &node_txn[0]);
-       connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting".to_string() });
-       let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
-       assert_eq!(spend_txn.len(), 1);
-       check_spends!(spend_txn[0], node_txn[0]);
+       let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+       // The node B should not broadcast the transaction to force close the channel!
+       assert!(node_txn.is_empty());
+       // B should now detect that there is something wrong and should force close the channel.
+       let exp_err = "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting";
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: exp_err.to_string() });
+
+       // after the warning message sent by B, we should not able to
+       // use the channel, or reconnect with success to the channel.
+       assert!(nodes[0].node.list_usable_channels().is_empty());
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+       let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+
+       nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]);
+       let mut err_msgs_0 = Vec::with_capacity(1);
+       for msg in nodes[0].node.get_and_clear_pending_msg_events() {
+               if let MessageSendEvent::HandleError { ref action, .. } = msg {
+                       match action {
+                               &ErrorAction::SendErrorMessage { ref msg } => {
+                                       assert_eq!(msg.data, "Failed to find corresponding channel");
+                                       err_msgs_0.push(msg.clone());
+                               },
+                               _ => panic!("Unexpected event!"),
+                       }
+               } else {
+                       panic!("Unexpected event!");
+               }
+       }
+       assert_eq!(err_msgs_0.len(), 1);
+       nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &err_msgs_0[0]);
+       assert!(nodes[1].node.list_usable_channels().is_empty());
+       check_added_monitors!(nodes[1], 1);
+       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_owned() });
+       check_closed_broadcast!(nodes[1], false);
 }
 
 #[test]
@@ -7596,7 +7685,7 @@ fn test_bump_penalty_txn_on_revoked_commitment() {
                assert_eq!(node_txn[0].output.len(), 1);
                check_spends!(node_txn[0], revoked_txn[0]);
                let fee_1 = penalty_sum - node_txn[0].output[0].value;
-               feerate_1 = fee_1 * 1000 / node_txn[0].get_weight() as u64;
+               feerate_1 = fee_1 * 1000 / node_txn[0].weight() as u64;
                penalty_1 = node_txn[0].txid();
                node_txn.clear();
        };
@@ -7616,7 +7705,7 @@ fn test_bump_penalty_txn_on_revoked_commitment() {
                        // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
                        assert_ne!(penalty_2, penalty_1);
                        let fee_2 = penalty_sum - node_txn[0].output[0].value;
-                       feerate_2 = fee_2 * 1000 / node_txn[0].get_weight() as u64;
+                       feerate_2 = fee_2 * 1000 / node_txn[0].weight() as u64;
                        // Verify 25% bump heuristic
                        assert!(feerate_2 * 100 >= feerate_1 * 125);
                        node_txn.clear();
@@ -7639,7 +7728,7 @@ fn test_bump_penalty_txn_on_revoked_commitment() {
                        // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
                        assert_ne!(penalty_3, penalty_2);
                        let fee_3 = penalty_sum - node_txn[0].output[0].value;
-                       feerate_3 = fee_3 * 1000 / node_txn[0].get_weight() as u64;
+                       feerate_3 = fee_3 * 1000 / node_txn[0].weight() as u64;
                        // Verify 25% bump heuristic
                        assert!(feerate_3 * 100 >= feerate_2 * 125);
                        node_txn.clear();
@@ -7758,7 +7847,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
                first = node_txn[4].txid();
                // Store both feerates for later comparison
                let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[2].output[0].value - node_txn[4].output[0].value;
-               feerate_1 = fee_1 * 1000 / node_txn[4].get_weight() as u64;
+               feerate_1 = fee_1 * 1000 / node_txn[4].weight() as u64;
                penalty_txn = vec![node_txn[2].clone()];
                node_txn.clear();
        }
@@ -7798,7 +7887,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
                // Verify bumped tx is different and 25% bump heuristic
                assert_ne!(first, node_txn[0].txid());
                let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[2].output[0].value - node_txn[0].output[0].value;
-               let feerate_2 = fee_2 * 1000 / node_txn[0].get_weight() as u64;
+               let feerate_2 = fee_2 * 1000 / node_txn[0].weight() as u64;
                assert!(feerate_2 * 100 > feerate_1 * 125);
                let txn = vec![node_txn[0].clone()];
                node_txn.clear();
@@ -7882,12 +7971,12 @@ fn test_bump_penalty_txn_on_remote_commitment() {
                timeout = node_txn[6].txid();
                let index = node_txn[6].input[0].previous_output.vout;
                let fee = remote_txn[0].output[index as usize].value - node_txn[6].output[0].value;
-               feerate_timeout = fee * 1000 / node_txn[6].get_weight() as u64;
+               feerate_timeout = fee * 1000 / node_txn[6].weight() as u64;
 
                preimage = node_txn[0].txid();
                let index = node_txn[0].input[0].previous_output.vout;
                let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
-               feerate_preimage = fee * 1000 / node_txn[0].get_weight() as u64;
+               feerate_preimage = fee * 1000 / node_txn[0].weight() as u64;
 
                node_txn.clear();
        };
@@ -7906,13 +7995,13 @@ fn test_bump_penalty_txn_on_remote_commitment() {
 
                let index = preimage_bump.input[0].previous_output.vout;
                let fee = remote_txn[0].output[index as usize].value - preimage_bump.output[0].value;
-               let new_feerate = fee * 1000 / preimage_bump.get_weight() as u64;
+               let new_feerate = fee * 1000 / preimage_bump.weight() as u64;
                assert!(new_feerate * 100 > feerate_timeout * 125);
                assert_ne!(timeout, preimage_bump.txid());
 
                let index = node_txn[0].input[0].previous_output.vout;
                let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
-               let new_feerate = fee * 1000 / node_txn[0].get_weight() as u64;
+               let new_feerate = fee * 1000 / node_txn[0].weight() as u64;
                assert!(new_feerate * 100 > feerate_preimage * 125);
                assert_ne!(preimage, node_txn[0].txid());
 
@@ -8124,6 +8213,58 @@ fn test_override_0msat_htlc_minimum() {
        assert_eq!(res.htlc_minimum_msat, 1);
 }
 
+#[test]
+fn test_channel_update_has_correct_htlc_maximum_msat() {
+       // Tests that the `ChannelUpdate` message has the correct values for `htlc_maximum_msat` set.
+       // Bolt 7 specifies that if present `htlc_maximum_msat`:
+       // 1. MUST be set to less than or equal to the channel capacity. In LDK, this is capped to
+       // 90% of the `channel_value`.
+       // 2. MUST be set to less than or equal to the `max_htlc_value_in_flight_msat` received from the peer.
+
+       let mut config_30_percent = UserConfig::default();
+       config_30_percent.channel_options.announced_channel = true;
+       config_30_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 30;
+       let mut config_50_percent = UserConfig::default();
+       config_50_percent.channel_options.announced_channel = true;
+       config_50_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50;
+       let mut config_95_percent = UserConfig::default();
+       config_95_percent.channel_options.announced_channel = true;
+       config_95_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 95;
+       let mut config_100_percent = UserConfig::default();
+       config_100_percent.channel_options.announced_channel = true;
+       config_100_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100;
+
+       let chanmon_cfgs = create_chanmon_cfgs(4);
+       let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[Some(config_30_percent), Some(config_50_percent), Some(config_95_percent), Some(config_100_percent)]);
+       let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+       let channel_value_satoshis = 100000;
+       let channel_value_msat = channel_value_satoshis * 1000;
+       let channel_value_30_percent_msat = (channel_value_msat as f64 * 0.3) as u64;
+       let channel_value_50_percent_msat = (channel_value_msat as f64 * 0.5) as u64;
+       let channel_value_90_percent_msat = (channel_value_msat as f64 * 0.9) as u64;
+
+       let (node_0_chan_update, node_1_chan_update, _, _)  = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value_satoshis, 10001, InitFeatures::known(), InitFeatures::known());
+       let (node_2_chan_update, node_3_chan_update, _, _)  = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, channel_value_satoshis, 10001, InitFeatures::known(), InitFeatures::known());
+
+       // Assert that `node[0]`'s `ChannelUpdate` is capped at 50 percent of the `channel_value`, as
+       // that's the value of `node[1]`'s `holder_max_htlc_value_in_flight_msat`.
+       assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_50_percent_msat));
+       // Assert that `node[1]`'s `ChannelUpdate` is capped at 30 percent of the `channel_value`, as
+       // that's the value of `node[0]`'s `holder_max_htlc_value_in_flight_msat`.
+       assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_30_percent_msat));
+
+       // Assert that `node[2]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
+       // the value of `node[3]`'s `holder_max_htlc_value_in_flight_msat` (100%), exceeds 90% of the
+       // `channel_value`.
+       assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_90_percent_msat));
+       // Assert that `node[3]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
+       // the value of `node[2]`'s `holder_max_htlc_value_in_flight_msat` (95%), exceeds 90% of the
+       // `channel_value`.
+       assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_90_percent_msat));
+}
+
 #[test]
 fn test_manually_accept_inbound_channel_request() {
        let mut manually_accept_conf = UserConfig::default();
@@ -9438,12 +9579,7 @@ fn test_forwardable_regen() {
        claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
 }
 
-#[test]
-fn test_dup_htlc_second_fail_panic() {
-       // Previously, if we received two HTLCs back-to-back, where the second overran the expected
-       // value for the payment, we'd fail back both HTLCs after generating a `PaymentReceived` event.
-       // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed
-       // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed.
+fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
@@ -9453,14 +9589,9 @@ fn test_dup_htlc_second_fail_panic() {
 
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id())
                .with_features(InvoiceFeatures::known());
-       let scorer = test_utils::TestScorer::with_penalty(0);
-       let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
-       let route = get_route(
-               &nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(),
-               Some(&nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()),
-               10_000, TEST_FINAL_CLTV, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
+       let route = get_route!(nodes[0], payment_params, 10_000, TEST_FINAL_CLTV).unwrap();
 
-       let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]);
+       let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]);
 
        {
                nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
@@ -9488,26 +9619,153 @@ fn test_dup_htlc_second_fail_panic() {
                // the first HTLC delivered above.
        }
 
-       // Now we go fail back the first HTLC from the user end.
        expect_pending_htlcs_forwardable_ignore!(nodes[1]);
        nodes[1].node.process_pending_htlc_forwards();
-       nodes[1].node.fail_htlc_backwards(&our_payment_hash);
 
-       expect_pending_htlcs_forwardable_ignore!(nodes[1]);
-       nodes[1].node.process_pending_htlc_forwards();
+       if test_for_second_fail_panic {
+               // Now we go fail back the first HTLC from the user end.
+               nodes[1].node.fail_htlc_backwards(&our_payment_hash);
 
-       check_added_monitors!(nodes[1], 1);
-       let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
-       assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2);
+               expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+               nodes[1].node.process_pending_htlc_forwards();
+
+               check_added_monitors!(nodes[1], 1);
+               let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2);
+
+               nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
+               nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]);
+               commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
+
+               let failure_events = nodes[0].node.get_and_clear_pending_events();
+               assert_eq!(failure_events.len(), 2);
+               if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); }
+               if let Event::PaymentPathFailed { .. } = failure_events[1] {} else { panic!(); }
+       } else {
+               // Let the second HTLC fail and claim the first
+               expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+               nodes[1].node.process_pending_htlc_forwards();
+
+               check_added_monitors!(nodes[1], 1);
+               let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
+               commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
+
+               expect_payment_failed_conditions!(nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
+
+               claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
+       }
+}
+
+#[test]
+fn test_dup_htlc_second_fail_panic() {
+       // Previously, if we received two HTLCs back-to-back, where the second overran the expected
+       // value for the payment, we'd fail back both HTLCs after generating a `PaymentReceived` event.
+       // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed
+       // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed.
+       do_test_dup_htlc_second_rejected(true);
+}
+
+#[test]
+fn test_dup_htlc_second_rejected() {
+       // Test that if we receive a second HTLC for an MPP payment that overruns the payment amount we
+       // simply reject the second HTLC but are still able to claim the first HTLC.
+       do_test_dup_htlc_second_rejected(false);
+}
+
+#[test]
+fn test_inconsistent_mpp_params() {
+       // Test that if we recieve two HTLCs with different payment parameters we fail back the first
+       // such HTLC and allow the second to stay.
+       let chanmon_cfgs = create_chanmon_cfgs(4);
+       let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+       let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+       create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+       create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
 
-       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
-       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]);
-       commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
+       let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id())
+               .with_features(InvoiceFeatures::known());
+       let mut route = get_route!(nodes[0], payment_params, 15_000_000, TEST_FINAL_CLTV).unwrap();
+       assert_eq!(route.paths.len(), 2);
+       route.paths.sort_by(|path_a, _| {
+               // Sort the path so that the path through nodes[1] comes first
+               if path_a[0].pubkey == nodes[1].node.get_our_node_id() {
+                       core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
+       });
+       let payment_params_opt = Some(payment_params);
+
+       let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]);
+
+       let cur_height = nodes[0].best_block_info().1;
+       let payment_id = PaymentId([42; 32]);
+       {
+               nodes[0].node.send_payment_along_path(&route.paths[0], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None).unwrap();
+               check_added_monitors!(nodes[0], 1);
+
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), false, None);
+       }
+       assert!(nodes[3].node.get_and_clear_pending_events().is_empty());
+
+       {
+               nodes[0].node.send_payment_along_path(&route.paths[1], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 14_000_000, cur_height, payment_id, &None).unwrap();
+               check_added_monitors!(nodes[0], 1);
+
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               let payment_event = SendEvent::from_event(events.pop().unwrap());
+
+               nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+               commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false);
+
+               expect_pending_htlcs_forwardable!(nodes[2]);
+               check_added_monitors!(nodes[2], 1);
+
+               let mut events = nodes[2].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               let payment_event = SendEvent::from_event(events.pop().unwrap());
+
+               nodes[3].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
+               check_added_monitors!(nodes[3], 0);
+               commitment_signed_dance!(nodes[3], nodes[2], payment_event.commitment_msg, true, true);
+
+               // At this point, nodes[3] should notice the two HTLCs don't contain the same total payment
+               // amount. It will assume the second is a privacy attack (no longer particularly relevant
+               // post-payment_secrets) and fail back the new HTLC.
+       }
+       expect_pending_htlcs_forwardable_ignore!(nodes[3]);
+       nodes[3].node.process_pending_htlc_forwards();
+       expect_pending_htlcs_forwardable_ignore!(nodes[3]);
+       nodes[3].node.process_pending_htlc_forwards();
+
+       check_added_monitors!(nodes[3], 1);
+
+       let fail_updates_1 = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
+       nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
+       commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false);
+
+       expect_pending_htlcs_forwardable!(nodes[2]);
+       check_added_monitors!(nodes[2], 1);
+
+       let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_updates_2.update_fail_htlcs[0]);
+       commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false);
+
+       expect_payment_failed_conditions!(nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
+
+       nodes[0].node.send_payment_along_path(&route.paths[1], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None).unwrap();
+       check_added_monitors!(nodes[0], 1);
+
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None);
 
-       let failure_events = nodes[0].node.get_and_clear_pending_events();
-       assert_eq!(failure_events.len(), 2);
-       if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); }
-       if let Event::PaymentPathFailed { .. } = failure_events[1] {} else { panic!(); }
+       claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage);
 }
 
 #[test]
index 8ed77e5a615afe44e352d2fc5c12a0186c9bcc50..f4f114d957193c8bf696f8cc12cacb49c005d0ed 100644 (file)
@@ -200,7 +200,7 @@ fn construct_payment_secret(iv_bytes: &[u8; IV_LEN], metadata_bytes: &[u8; METAD
 /// [`KeysInterface::get_inbound_payment_key_material`]: crate::chain::keysinterface::KeysInterface::get_inbound_payment_key_material
 /// [`create_inbound_payment`]: crate::ln::channelmanager::ChannelManager::create_inbound_payment
 /// [`create_inbound_payment_for_hash`]: crate::ln::channelmanager::ChannelManager::create_inbound_payment_for_hash
-pub(super) fn verify<L: Deref>(payment_hash: PaymentHash, payment_data: msgs::FinalOnionHopData, highest_seen_timestamp: u64, keys: &ExpandedKey, logger: &L) -> Result<Option<PaymentPreimage>, ()>
+pub(super) fn verify<L: Deref>(payment_hash: PaymentHash, payment_data: &msgs::FinalOnionHopData, highest_seen_timestamp: u64, keys: &ExpandedKey, logger: &L) -> Result<Option<PaymentPreimage>, ()>
        where L::Target: Logger
 {
        let (iv_bytes, metadata_bytes) = decrypt_metadata(payment_data.payment_secret, keys);
index ef07d4c918f8597f28a6b3df90aeaded15b06bb9..281a2a8e977123158f5daeba45ff8666c96ec403 100644 (file)
@@ -24,8 +24,8 @@
 //! raw socket events into your non-internet-facing system and then send routing events back to
 //! track the network on the less-secure system.
 
-use bitcoin::secp256k1::key::PublicKey;
-use bitcoin::secp256k1::Signature;
+use bitcoin::secp256k1::PublicKey;
+use bitcoin::secp256k1::ecdsa::Signature;
 use bitcoin::secp256k1;
 use bitcoin::blockdata::script::Script;
 use bitcoin::hash_types::{Txid, BlockHash};
@@ -1835,7 +1835,7 @@ mod tests {
        use bitcoin::blockdata::opcodes;
        use bitcoin::hash_types::{Txid, BlockHash};
 
-       use bitcoin::secp256k1::key::{PublicKey,SecretKey};
+       use bitcoin::secp256k1::{PublicKey,SecretKey};
        use bitcoin::secp256k1::{Secp256k1, Message};
 
        use io::Cursor;
@@ -1892,7 +1892,7 @@ mod tests {
                ($privkey: expr, $ctx: expr, $string: expr) => {
                        {
                                let sighash = Message::from_slice(&$string.into_bytes()[..]).unwrap();
-                               $ctx.sign(&sighash, &$privkey)
+                               $ctx.sign_ecdsa(&sighash, &$privkey)
                        }
                }
        }
@@ -2155,7 +2155,7 @@ mod tests {
                        htlc_basepoint: pubkey_5,
                        first_per_commitment_point: pubkey_6,
                        channel_flags: if random_bit { 1 << 5 } else { 0 },
-                       shutdown_scriptpubkey: if shutdown { OptionalField::Present(Address::p2pkh(&::bitcoin::PublicKey{compressed: true, key: pubkey_1}, Network::Testnet).script_pubkey()) } else { OptionalField::Absent },
+                       shutdown_scriptpubkey: if shutdown { OptionalField::Present(Address::p2pkh(&::bitcoin::PublicKey{compressed: true, inner: pubkey_1}, Network::Testnet).script_pubkey()) } else { OptionalField::Absent },
                        channel_type: if incl_chan_type { Some(ChannelTypeFeatures::empty()) } else { None },
                };
                let encoded_value = open_channel.encode();
@@ -2211,7 +2211,7 @@ mod tests {
                        delayed_payment_basepoint: pubkey_4,
                        htlc_basepoint: pubkey_5,
                        first_per_commitment_point: pubkey_6,
-                       shutdown_scriptpubkey: if shutdown { OptionalField::Present(Address::p2pkh(&::bitcoin::PublicKey{compressed: true, key: pubkey_1}, Network::Testnet).script_pubkey()) } else { OptionalField::Absent },
+                       shutdown_scriptpubkey: if shutdown { OptionalField::Present(Address::p2pkh(&::bitcoin::PublicKey{compressed: true, inner: pubkey_1}, Network::Testnet).script_pubkey()) } else { OptionalField::Absent },
                        channel_type: None,
                };
                let encoded_value = accept_channel.encode();
@@ -2279,9 +2279,9 @@ mod tests {
                let shutdown = msgs::Shutdown {
                        channel_id: [2; 32],
                        scriptpubkey:
-                                    if script_type == 1 { Address::p2pkh(&::bitcoin::PublicKey{compressed: true, key: pubkey_1}, Network::Testnet).script_pubkey() }
-                               else if script_type == 2 { Address::p2sh(&script, Network::Testnet).script_pubkey() }
-                               else if script_type == 3 { Address::p2wpkh(&::bitcoin::PublicKey{compressed: true, key: pubkey_1}, Network::Testnet).unwrap().script_pubkey() }
+                                    if script_type == 1 { Address::p2pkh(&::bitcoin::PublicKey{compressed: true, inner: pubkey_1}, Network::Testnet).script_pubkey() }
+                               else if script_type == 2 { Address::p2sh(&script, Network::Testnet).unwrap().script_pubkey() }
+                               else if script_type == 3 { Address::p2wpkh(&::bitcoin::PublicKey{compressed: true, inner: pubkey_1}, Network::Testnet).unwrap().script_pubkey() }
                                else                     { Address::p2wsh(&script, Network::Testnet).script_pubkey() },
                };
                let encoded_value = shutdown.encode();
index 834791cba958de960d0c5f90868173f5097f0705..9a07603fafe7e89e90b71a0d943bba59c483a223 100644 (file)
@@ -21,6 +21,7 @@ use routing::router::{get_route, PaymentParameters, Route, RouteHint, RouteHintH
 use ln::features::{InitFeatures, InvoiceFeatures, NodeFeatures};
 use ln::msgs;
 use ln::msgs::{ChannelMessageHandler, ChannelUpdate, OptionalField};
+use ln::wire::Encode;
 use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
 use util::ser::{Writeable, Writer};
 use util::{byte_utils, test_utils};
@@ -33,7 +34,7 @@ use bitcoin::hashes::sha256::Hash as Sha256;
 
 use bitcoin::secp256k1;
 use bitcoin::secp256k1::Secp256k1;
-use bitcoin::secp256k1::key::{PublicKey, SecretKey};
+use bitcoin::secp256k1::{PublicKey, SecretKey};
 
 use io;
 use prelude::*;
@@ -214,7 +215,7 @@ fn run_onion_failure_test_with_fail_intercept<F1,F2,F3>(_name: &str, test_case:
 impl msgs::ChannelUpdate {
        fn dummy(short_channel_id: u64) -> msgs::ChannelUpdate {
                use bitcoin::secp256k1::ffi::Signature as FFISignature;
-               use bitcoin::secp256k1::Signature;
+               use bitcoin::secp256k1::ecdsa::Signature;
                msgs::ChannelUpdate {
                        signature: Signature::from(unsafe { FFISignature::new() }),
                        contents: msgs::UnsignedChannelUpdate {
@@ -371,7 +372,7 @@ fn test_onion_failure() {
                // and tamper returning error message
                let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
                let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
-               msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[0].shared_secret[..], NODE|2, &[0;0]);
+               msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), NODE|2, &[0;0]);
        }, ||{}, true, Some(NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0][0].pubkey, is_permanent: false}), Some(route.paths[0][0].short_channel_id));
 
        // final node failure
@@ -379,7 +380,7 @@ fn test_onion_failure() {
                // and tamper returning error message
                let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
                let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
-               msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[1].shared_secret[..], NODE|2, &[0;0]);
+               msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[1].shared_secret.as_ref(), NODE|2, &[0;0]);
        }, ||{
                nodes[2].node.fail_htlc_backwards(&payment_hash);
        }, true, Some(NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0][1].pubkey, is_permanent: false}), Some(route.paths[0][1].short_channel_id));
@@ -391,14 +392,14 @@ fn test_onion_failure() {
        }, |msg| {
                let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
                let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
-               msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[0].shared_secret[..], PERM|NODE|2, &[0;0]);
+               msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|NODE|2, &[0;0]);
        }, ||{}, true, Some(PERM|NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0][0].pubkey, is_permanent: true}), Some(route.paths[0][0].short_channel_id));
 
        // final node failure
        run_onion_failure_test_with_fail_intercept("permanent_node_failure", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| {
                let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
                let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
-               msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[1].shared_secret[..], PERM|NODE|2, &[0;0]);
+               msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[1].shared_secret.as_ref(), PERM|NODE|2, &[0;0]);
        }, ||{
                nodes[2].node.fail_htlc_backwards(&payment_hash);
        }, false, Some(PERM|NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0][1].pubkey, is_permanent: true}), Some(route.paths[0][1].short_channel_id));
@@ -410,7 +411,7 @@ fn test_onion_failure() {
        }, |msg| {
                let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
                let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
-               msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[0].shared_secret[..], PERM|NODE|3, &[0;0]);
+               msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|NODE|3, &[0;0]);
        }, ||{
                nodes[2].node.fail_htlc_backwards(&payment_hash);
        }, true, Some(PERM|NODE|3), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0][0].pubkey, is_permanent: true}), Some(route.paths[0][0].short_channel_id));
@@ -419,7 +420,7 @@ fn test_onion_failure() {
        run_onion_failure_test_with_fail_intercept("required_node_feature_missing", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| {
                let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
                let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
-               msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[1].shared_secret[..], PERM|NODE|3, &[0;0]);
+               msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[1].shared_secret.as_ref(), PERM|NODE|3, &[0;0]);
        }, ||{
                nodes[2].node.fail_htlc_backwards(&payment_hash);
        }, false, Some(PERM|NODE|3), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0][1].pubkey, is_permanent: true}), Some(route.paths[0][1].short_channel_id));
@@ -438,13 +439,29 @@ fn test_onion_failure() {
                Some(BADONION|PERM|6), None, Some(short_channel_id));
 
        let short_channel_id = channels[1].0.contents.short_channel_id;
+       let chan_update = ChannelUpdate::dummy(short_channel_id);
+
+       let mut err_data = Vec::new();
+       err_data.extend_from_slice(&(chan_update.serialized_length() as u16 + 2).to_be_bytes());
+       err_data.extend_from_slice(&ChannelUpdate::TYPE.to_be_bytes());
+       err_data.extend_from_slice(&chan_update.encode());
+       run_onion_failure_test_with_fail_intercept("temporary_channel_failure", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| {
+               msg.amount_msat -= 1;
+       }, |msg| {
+               let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
+               let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
+               msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), UPDATE|7, &err_data);
+       }, ||{}, true, Some(UPDATE|7), Some(NetworkUpdate::ChannelUpdateMessage{msg: chan_update.clone()}), Some(short_channel_id));
+
+       // Check we can still handle onion failures that include channel updates without a type prefix
+       let err_data_without_type = chan_update.encode_with_len();
        run_onion_failure_test_with_fail_intercept("temporary_channel_failure", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| {
                msg.amount_msat -= 1;
        }, |msg| {
                let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
                let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
-               msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[0].shared_secret[..], UPDATE|7, &ChannelUpdate::dummy(short_channel_id).encode_with_len()[..]);
-       }, ||{}, true, Some(UPDATE|7), Some(NetworkUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy(short_channel_id)}), Some(short_channel_id));
+               msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), UPDATE|7, &err_data_without_type);
+       }, ||{}, true, Some(UPDATE|7), Some(NetworkUpdate::ChannelUpdateMessage{msg: chan_update}), Some(short_channel_id));
 
        let short_channel_id = channels[1].0.contents.short_channel_id;
        run_onion_failure_test_with_fail_intercept("permanent_channel_failure", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| {
@@ -452,7 +469,7 @@ fn test_onion_failure() {
        }, |msg| {
                let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
                let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
-               msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[0].shared_secret[..], PERM|8, &[0;0]);
+               msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|8, &[0;0]);
                // short_channel_id from the processing node
        }, ||{}, true, Some(PERM|8), Some(NetworkUpdate::ChannelClosed{short_channel_id, is_permanent: true}), Some(short_channel_id));
 
@@ -462,7 +479,7 @@ fn test_onion_failure() {
        }, |msg| {
                let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
                let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
-               msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[0].shared_secret[..], PERM|9, &[0;0]);
+               msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|9, &[0;0]);
                // short_channel_id from the processing node
        }, ||{}, true, Some(PERM|9), Some(NetworkUpdate::ChannelClosed{short_channel_id, is_permanent: true}), Some(short_channel_id));
 
@@ -571,7 +588,7 @@ fn test_onion_failure() {
                // Tamper returning error message
                let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
                let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
-               msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[1].shared_secret[..], 23, &[0;0]);
+               msg.reason = onion_utils::build_first_hop_failure_packet(onion_keys[1].shared_secret.as_ref(), 23, &[0;0]);
        }, ||{
                nodes[2].node.fail_htlc_backwards(&payment_hash);
        }, true, Some(23), None, None);
@@ -1097,11 +1114,15 @@ fn test_phantom_dust_exposure_failure() {
        commitment_signed_dance!(nodes[0], nodes[1], update_1.commitment_signed, false);
 
        // Ensure the payment fails with the expected error.
-       let mut error_data = channel.1.encode_with_len();
+       let mut err_data = Vec::new();
+       err_data.extend_from_slice(&(channel.1.serialized_length() as u16 + 2).to_be_bytes());
+       err_data.extend_from_slice(&ChannelUpdate::TYPE.to_be_bytes());
+       err_data.extend_from_slice(&channel.1.encode());
+
        let mut fail_conditions = PaymentFailedConditions::new()
                .blamed_scid(channel.0.contents.short_channel_id)
                .blamed_chan_closed(false)
-               .expected_htlc_error_data(0x1000 | 7, &error_data);
+               .expected_htlc_error_data(0x1000 | 7, &err_data);
                expect_payment_failed_conditions!(nodes[0], payment_hash, false, fail_conditions);
 }
 
index 0dd6087f82018d9faf56d53672325b76284caa00..9e4ae058f8d517bdfb93abb021d02a7eb588e8a5 100644 (file)
@@ -10,6 +10,7 @@
 use ln::{PaymentHash, PaymentPreimage, PaymentSecret};
 use ln::channelmanager::HTLCSource;
 use ln::msgs;
+use ln::wire::Encode;
 use routing::network_graph::NetworkUpdate;
 use routing::router::RouteHop;
 use util::chacha20::{ChaCha20, ChaChaReader};
@@ -22,7 +23,7 @@ use bitcoin::hashes::cmp::fixed_time_eq;
 use bitcoin::hashes::hmac::{Hmac, HmacEngine};
 use bitcoin::hashes::sha256::Hash as Sha256;
 
-use bitcoin::secp256k1::key::{SecretKey,PublicKey};
+use bitcoin::secp256k1::{SecretKey,PublicKey};
 use bitcoin::secp256k1::Secp256k1;
 use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1;
@@ -47,12 +48,12 @@ pub(super) fn gen_rho_mu_from_shared_secret(shared_secret: &[u8]) -> ([u8; 32],
        assert_eq!(shared_secret.len(), 32);
        ({
                let mut hmac = HmacEngine::<Sha256>::new(&[0x72, 0x68, 0x6f]); // rho
-               hmac.input(&shared_secret[..]);
+               hmac.input(&shared_secret);
                Hmac::from_engine(hmac).into_inner()
        },
        {
                let mut hmac = HmacEngine::<Sha256>::new(&[0x6d, 0x75]); // mu
-               hmac.input(&shared_secret[..]);
+               hmac.input(&shared_secret);
                Hmac::from_engine(hmac).into_inner()
        })
 }
@@ -61,7 +62,7 @@ pub(super) fn gen_rho_mu_from_shared_secret(shared_secret: &[u8]) -> ([u8; 32],
 pub(super) fn gen_um_from_shared_secret(shared_secret: &[u8]) -> [u8; 32] {
        assert_eq!(shared_secret.len(), 32);
        let mut hmac = HmacEngine::<Sha256>::new(&[0x75, 0x6d]); // um
-       hmac.input(&shared_secret[..]);
+       hmac.input(&shared_secret);
        Hmac::from_engine(hmac).into_inner()
 }
 
@@ -69,7 +70,7 @@ pub(super) fn gen_um_from_shared_secret(shared_secret: &[u8]) -> [u8; 32] {
 pub(super) fn gen_ammag_from_shared_secret(shared_secret: &[u8]) -> [u8; 32] {
        assert_eq!(shared_secret.len(), 32);
        let mut hmac = HmacEngine::<Sha256>::new(&[0x61, 0x6d, 0x6d, 0x61, 0x67]); // ammag
-       hmac.input(&shared_secret[..]);
+       hmac.input(&shared_secret);
        Hmac::from_engine(hmac).into_inner()
 }
 
@@ -84,7 +85,7 @@ pub(super) fn construct_onion_keys_callback<T: secp256k1::Signing, FType: FnMut(
 
                let mut sha = Sha256::engine();
                sha.input(&blinded_pub.serialize()[..]);
-               sha.input(&shared_secret[..]);
+               sha.input(shared_secret.as_ref());
                let blinding_factor = Sha256::from_engine(sha).into_inner();
 
                let ephemeral_pubkey = blinded_pub;
@@ -103,7 +104,7 @@ pub(super) fn construct_onion_keys<T: secp256k1::Signing>(secp_ctx: &Secp256k1<T
        let mut res = Vec::with_capacity(path.len());
 
        construct_onion_keys_callback(secp_ctx, path, session_priv, |shared_secret, _blinding_factor, ephemeral_pubkey, _, _| {
-               let (rho, mu) = gen_rho_mu_from_shared_secret(&shared_secret[..]);
+               let (rho, mu) = gen_rho_mu_from_shared_secret(shared_secret.as_ref());
 
                res.push(OnionKeys {
                        #[cfg(test)]
@@ -347,7 +348,7 @@ pub(super) fn process_onion_failure<T: secp256k1::Signing, L: Deref>(secp_ctx: &
                        let amt_to_forward = htlc_msat - route_hop.fee_msat;
                        htlc_msat = amt_to_forward;
 
-                       let ammag = gen_ammag_from_shared_secret(&shared_secret[..]);
+                       let ammag = gen_ammag_from_shared_secret(shared_secret.as_ref());
 
                        let mut decryption_tmp = Vec::with_capacity(packet_decrypted.len());
                        decryption_tmp.resize(packet_decrypted.len(), 0);
@@ -361,7 +362,7 @@ pub(super) fn process_onion_failure<T: secp256k1::Signing, L: Deref>(secp_ctx: &
                        let failing_route_hop = if is_from_final_node { route_hop } else { &path[route_hop_idx + 1] };
 
                        if let Ok(err_packet) = msgs::DecodedOnionErrorPacket::read(&mut Cursor::new(&packet_decrypted)) {
-                               let um = gen_um_from_shared_secret(&shared_secret[..]);
+                               let um = gen_um_from_shared_secret(shared_secret.as_ref());
                                let mut hmac = HmacEngine::<Sha256>::new(&um);
                                hmac.input(&err_packet.encode()[32..]);
 
@@ -404,7 +405,21 @@ pub(super) fn process_onion_failure<T: secp256k1::Signing, L: Deref>(secp_ctx: &
                                                else if error_code & UPDATE == UPDATE {
                                                        if let Some(update_len_slice) = err_packet.failuremsg.get(debug_field_size+2..debug_field_size+4) {
                                                                let update_len = u16::from_be_bytes(update_len_slice.try_into().expect("len is 2")) as usize;
-                                                               if let Some(update_slice) = err_packet.failuremsg.get(debug_field_size + 4..debug_field_size + 4 + update_len) {
+                                                               if let Some(mut update_slice) = err_packet.failuremsg.get(debug_field_size + 4..debug_field_size + 4 + update_len) {
+                                                                       // Historically, the BOLTs were unclear if the message type
+                                                                       // bytes should be included here or not. The BOLTs have now
+                                                                       // been updated to indicate that they *are* included, but many
+                                                                       // nodes still send messages without the type bytes, so we
+                                                                       // support both here.
+                                                                       // TODO: Switch to hard require the type prefix, as the current
+                                                                       // permissiveness introduces the (although small) possibility
+                                                                       // that we fail to decode legitimate channel updates that
+                                                                       // happen to start with ChannelUpdate::TYPE, i.e., [0x01, 0x02].
+                                                                       if update_slice.len() > 2 && update_slice[0..2] == msgs::ChannelUpdate::TYPE.to_be_bytes() {
+                                                                               update_slice = &update_slice[2..];
+                                                                       } else {
+                                                                               log_trace!(logger, "Failure provided features a channel update without type prefix. Deprecated, but allowing for now.");
+                                                                       }
                                                                        if let Ok(chan_update) = msgs::ChannelUpdate::read(&mut Cursor::new(&update_slice)) {
                                                                                // if channel_update should NOT have caused the failure:
                                                                                // MAY treat the channel_update as invalid.
@@ -434,6 +449,8 @@ pub(super) fn process_onion_failure<T: secp256k1::Signing, L: Deref>(secp_ctx: &
                                                                                        // short channel id.
                                                                                        if failing_route_hop.short_channel_id == chan_update.contents.short_channel_id {
                                                                                                short_channel_id = Some(failing_route_hop.short_channel_id);
+                                                                                       } else {
+                                                                                               log_info!(logger, "Node provided a channel_update for which it was not authoritative, ignoring.");
                                                                                        }
                                                                                        network_update = Some(NetworkUpdate::ChannelUpdateMessage {
                                                                                                msg: chan_update,
@@ -478,10 +495,10 @@ pub(super) fn process_onion_failure<T: secp256k1::Signing, L: Deref>(secp_ctx: &
 
                                                let (description, title) = errors::get_onion_error_description(error_code);
                                                if debug_field_size > 0 && err_packet.failuremsg.len() >= 4 + debug_field_size {
-                                                       log_warn!(logger, "Onion Error[from {}: {}({:#x}) {}({})] {}", route_hop.pubkey, title, error_code, debug_field, log_bytes!(&err_packet.failuremsg[4..4+debug_field_size]), description);
+                                                       log_info!(logger, "Onion Error[from {}: {}({:#x}) {}({})] {}", route_hop.pubkey, title, error_code, debug_field, log_bytes!(&err_packet.failuremsg[4..4+debug_field_size]), description);
                                                }
                                                else {
-                                                       log_warn!(logger, "Onion Error[from {}: {}({:#x})] {}", route_hop.pubkey, title, error_code, description);
+                                                       log_info!(logger, "Onion Error[from {}: {}({:#x})] {}", route_hop.pubkey, title, error_code, description);
                                                }
                                        } else {
                                                // Useless packet that we can't use but it passed HMAC, so it
@@ -627,7 +644,7 @@ mod tests {
        use hex;
 
        use bitcoin::secp256k1::Secp256k1;
-       use bitcoin::secp256k1::key::{PublicKey,SecretKey};
+       use bitcoin::secp256k1::{PublicKey,SecretKey};
 
        use super::OnionKeys;
 
@@ -678,31 +695,31 @@ mod tests {
                // Legacy packet creation test vectors from BOLT 4
                let onion_keys = build_test_onion_keys();
 
-               assert_eq!(onion_keys[0].shared_secret[..], hex::decode("53eb63ea8a3fec3b3cd433b85cd62a4b145e1dda09391b348c4e1cd36a03ea66").unwrap()[..]);
+               assert_eq!(onion_keys[0].shared_secret.secret_bytes(), hex::decode("53eb63ea8a3fec3b3cd433b85cd62a4b145e1dda09391b348c4e1cd36a03ea66").unwrap()[..]);
                assert_eq!(onion_keys[0].blinding_factor[..], hex::decode("2ec2e5da605776054187180343287683aa6a51b4b1c04d6dd49c45d8cffb3c36").unwrap()[..]);
                assert_eq!(onion_keys[0].ephemeral_pubkey.serialize()[..], hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]);
                assert_eq!(onion_keys[0].rho, hex::decode("ce496ec94def95aadd4bec15cdb41a740c9f2b62347c4917325fcc6fb0453986").unwrap()[..]);
                assert_eq!(onion_keys[0].mu, hex::decode("b57061dc6d0a2b9f261ac410c8b26d64ac5506cbba30267a649c28c179400eba").unwrap()[..]);
 
-               assert_eq!(onion_keys[1].shared_secret[..], hex::decode("a6519e98832a0b179f62123b3567c106db99ee37bef036e783263602f3488fae").unwrap()[..]);
+               assert_eq!(onion_keys[1].shared_secret.secret_bytes(), hex::decode("a6519e98832a0b179f62123b3567c106db99ee37bef036e783263602f3488fae").unwrap()[..]);
                assert_eq!(onion_keys[1].blinding_factor[..], hex::decode("bf66c28bc22e598cfd574a1931a2bafbca09163df2261e6d0056b2610dab938f").unwrap()[..]);
                assert_eq!(onion_keys[1].ephemeral_pubkey.serialize()[..], hex::decode("028f9438bfbf7feac2e108d677e3a82da596be706cc1cf342b75c7b7e22bf4e6e2").unwrap()[..]);
                assert_eq!(onion_keys[1].rho, hex::decode("450ffcabc6449094918ebe13d4f03e433d20a3d28a768203337bc40b6e4b2c59").unwrap()[..]);
                assert_eq!(onion_keys[1].mu, hex::decode("05ed2b4a3fb023c2ff5dd6ed4b9b6ea7383f5cfe9d59c11d121ec2c81ca2eea9").unwrap()[..]);
 
-               assert_eq!(onion_keys[2].shared_secret[..], hex::decode("3a6b412548762f0dbccce5c7ae7bb8147d1caf9b5471c34120b30bc9c04891cc").unwrap()[..]);
+               assert_eq!(onion_keys[2].shared_secret.secret_bytes(), hex::decode("3a6b412548762f0dbccce5c7ae7bb8147d1caf9b5471c34120b30bc9c04891cc").unwrap()[..]);
                assert_eq!(onion_keys[2].blinding_factor[..], hex::decode("a1f2dadd184eb1627049673f18c6325814384facdee5bfd935d9cb031a1698a5").unwrap()[..]);
                assert_eq!(onion_keys[2].ephemeral_pubkey.serialize()[..], hex::decode("03bfd8225241ea71cd0843db7709f4c222f62ff2d4516fd38b39914ab6b83e0da0").unwrap()[..]);
                assert_eq!(onion_keys[2].rho, hex::decode("11bf5c4f960239cb37833936aa3d02cea82c0f39fd35f566109c41f9eac8deea").unwrap()[..]);
                assert_eq!(onion_keys[2].mu, hex::decode("caafe2820fa00eb2eeb78695ae452eba38f5a53ed6d53518c5c6edf76f3f5b78").unwrap()[..]);
 
-               assert_eq!(onion_keys[3].shared_secret[..], hex::decode("21e13c2d7cfe7e18836df50872466117a295783ab8aab0e7ecc8c725503ad02d").unwrap()[..]);
+               assert_eq!(onion_keys[3].shared_secret.secret_bytes(), hex::decode("21e13c2d7cfe7e18836df50872466117a295783ab8aab0e7ecc8c725503ad02d").unwrap()[..]);
                assert_eq!(onion_keys[3].blinding_factor[..], hex::decode("7cfe0b699f35525029ae0fa437c69d0f20f7ed4e3916133f9cacbb13c82ff262").unwrap()[..]);
                assert_eq!(onion_keys[3].ephemeral_pubkey.serialize()[..], hex::decode("031dde6926381289671300239ea8e57ffaf9bebd05b9a5b95beaf07af05cd43595").unwrap()[..]);
                assert_eq!(onion_keys[3].rho, hex::decode("cbe784ab745c13ff5cffc2fbe3e84424aa0fd669b8ead4ee562901a4a4e89e9e").unwrap()[..]);
                assert_eq!(onion_keys[3].mu, hex::decode("5052aa1b3d9f0655a0932e50d42f0c9ba0705142c25d225515c45f47c0036ee9").unwrap()[..]);
 
-               assert_eq!(onion_keys[4].shared_secret[..], hex::decode("b5756b9b542727dbafc6765a49488b023a725d631af688fc031217e90770c328").unwrap()[..]);
+               assert_eq!(onion_keys[4].shared_secret.secret_bytes(), hex::decode("b5756b9b542727dbafc6765a49488b023a725d631af688fc031217e90770c328").unwrap()[..]);
                assert_eq!(onion_keys[4].blinding_factor[..], hex::decode("c96e00dddaf57e7edcd4fb5954be5b65b09f17cb6d20651b4e90315be5779205").unwrap()[..]);
                assert_eq!(onion_keys[4].ephemeral_pubkey.serialize()[..], hex::decode("03a214ebd875aab6ddfd77f22c5e7311d7f77f17a169e599f157bbcdae8bf071f4").unwrap()[..]);
                assert_eq!(onion_keys[4].rho, hex::decode("034e18b8cc718e8af6339106e706c52d8df89e2b1f7e9142d996acf88df8799b").unwrap()[..]);
@@ -758,22 +775,22 @@ mod tests {
                // Returning Errors test vectors from BOLT 4
 
                let onion_keys = build_test_onion_keys();
-               let onion_error = super::build_failure_packet(&onion_keys[4].shared_secret[..], 0x2002, &[0; 0]);
+               let onion_error = super::build_failure_packet(onion_keys[4].shared_secret.as_ref(), 0x2002, &[0; 0]);
                assert_eq!(onion_error.encode(), hex::decode("4c2fc8bc08510334b6833ad9c3e79cd1b52ae59dfe5c2a4b23ead50f09f7ee0b0002200200fe0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap());
 
-               let onion_packet_1 = super::encrypt_failure_packet(&onion_keys[4].shared_secret[..], &onion_error.encode()[..]);
+               let onion_packet_1 = super::encrypt_failure_packet(onion_keys[4].shared_secret.as_ref(), &onion_error.encode()[..]);
                assert_eq!(onion_packet_1.data, hex::decode("a5e6bd0c74cb347f10cce367f949098f2457d14c046fd8a22cb96efb30b0fdcda8cb9168b50f2fd45edd73c1b0c8b33002df376801ff58aaa94000bf8a86f92620f343baef38a580102395ae3abf9128d1047a0736ff9b83d456740ebbb4aeb3aa9737f18fb4afb4aa074fb26c4d702f42968888550a3bded8c05247e045b866baef0499f079fdaeef6538f31d44deafffdfd3afa2fb4ca9082b8f1c465371a9894dd8c243fb4847e004f5256b3e90e2edde4c9fb3082ddfe4d1e734cacd96ef0706bf63c9984e22dc98851bcccd1c3494351feb458c9c6af41c0044bea3c47552b1d992ae542b17a2d0bba1a096c78d169034ecb55b6e3a7263c26017f033031228833c1daefc0dedb8cf7c3e37c9c37ebfe42f3225c326e8bcfd338804c145b16e34e4").unwrap());
 
-               let onion_packet_2 = super::encrypt_failure_packet(&onion_keys[3].shared_secret[..], &onion_packet_1.data[..]);
+               let onion_packet_2 = super::encrypt_failure_packet(onion_keys[3].shared_secret.as_ref(), &onion_packet_1.data[..]);
                assert_eq!(onion_packet_2.data, hex::decode("c49a1ce81680f78f5f2000cda36268de34a3f0a0662f55b4e837c83a8773c22aa081bab1616a0011585323930fa5b9fae0c85770a2279ff59ec427ad1bbff9001c0cd1497004bd2a0f68b50704cf6d6a4bf3c8b6a0833399a24b3456961ba00736785112594f65b6b2d44d9f5ea4e49b5e1ec2af978cbe31c67114440ac51a62081df0ed46d4a3df295da0b0fe25c0115019f03f15ec86fabb4c852f83449e812f141a9395b3f70b766ebbd4ec2fae2b6955bd8f32684c15abfe8fd3a6261e52650e8807a92158d9f1463261a925e4bfba44bd20b166d532f0017185c3a6ac7957adefe45559e3072c8dc35abeba835a8cb01a71a15c736911126f27d46a36168ca5ef7dccd4e2886212602b181463e0dd30185c96348f9743a02aca8ec27c0b90dca270").unwrap());
 
-               let onion_packet_3 = super::encrypt_failure_packet(&onion_keys[2].shared_secret[..], &onion_packet_2.data[..]);
+               let onion_packet_3 = super::encrypt_failure_packet(onion_keys[2].shared_secret.as_ref(), &onion_packet_2.data[..]);
                assert_eq!(onion_packet_3.data, hex::decode("a5d3e8634cfe78b2307d87c6d90be6fe7855b4f2cc9b1dfb19e92e4b79103f61ff9ac25f412ddfb7466e74f81b3e545563cdd8f5524dae873de61d7bdfccd496af2584930d2b566b4f8d3881f8c043df92224f38cf094cfc09d92655989531524593ec6d6caec1863bdfaa79229b5020acc034cd6deeea1021c50586947b9b8e6faa83b81fbfa6133c0af5d6b07c017f7158fa94f0d206baf12dda6b68f785b773b360fd0497e16cc402d779c8d48d0fa6315536ef0660f3f4e1865f5b38ea49c7da4fd959de4e83ff3ab686f059a45c65ba2af4a6a79166aa0f496bf04d06987b6d2ea205bdb0d347718b9aeff5b61dfff344993a275b79717cd815b6ad4c0beb568c4ac9c36ff1c315ec1119a1993c4b61e6eaa0375e0aaf738ac691abd3263bf937e3").unwrap());
 
-               let onion_packet_4 = super::encrypt_failure_packet(&onion_keys[1].shared_secret[..], &onion_packet_3.data[..]);
+               let onion_packet_4 = super::encrypt_failure_packet(onion_keys[1].shared_secret.as_ref(), &onion_packet_3.data[..]);
                assert_eq!(onion_packet_4.data, hex::decode("aac3200c4968f56b21f53e5e374e3a2383ad2b1b6501bbcc45abc31e59b26881b7dfadbb56ec8dae8857add94e6702fb4c3a4de22e2e669e1ed926b04447fc73034bb730f4932acd62727b75348a648a1128744657ca6a4e713b9b646c3ca66cac02cdab44dd3439890ef3aaf61708714f7375349b8da541b2548d452d84de7084bb95b3ac2345201d624d31f4d52078aa0fa05a88b4e20202bd2b86ac5b52919ea305a8949de95e935eed0319cf3cf19ebea61d76ba92532497fcdc9411d06bcd4275094d0a4a3c5d3a945e43305a5a9256e333e1f64dbca5fcd4e03a39b9012d197506e06f29339dfee3331995b21615337ae060233d39befea925cc262873e0530408e6990f1cbd233a150ef7b004ff6166c70c68d9f8c853c1abca640b8660db2921").unwrap());
 
-               let onion_packet_5 = super::encrypt_failure_packet(&onion_keys[0].shared_secret[..], &onion_packet_4.data[..]);
+               let onion_packet_5 = super::encrypt_failure_packet(onion_keys[0].shared_secret.as_ref(), &onion_packet_4.data[..]);
                assert_eq!(onion_packet_5.data, hex::decode("9c5add3963fc7f6ed7f148623c84134b5647e1306419dbe2174e523fa9e2fbed3a06a19f899145610741c83ad40b7712aefaddec8c6baf7325d92ea4ca4d1df8bce517f7e54554608bf2bd8071a4f52a7a2f7ffbb1413edad81eeea5785aa9d990f2865dc23b4bc3c301a94eec4eabebca66be5cf638f693ec256aec514620cc28ee4a94bd9565bc4d4962b9d3641d4278fb319ed2b84de5b665f307a2db0f7fbb757366067d88c50f7e829138fde4f78d39b5b5802f1b92a8a820865af5cc79f9f30bc3f461c66af95d13e5e1f0381c184572a91dee1c849048a647a1158cf884064deddbf1b0b88dfe2f791428d0ba0f6fb2f04e14081f69165ae66d9297c118f0907705c9c4954a199bae0bb96fad763d690e7daa6cfda59ba7f2c8d11448b604d12d").unwrap());
        }
 
index fbd32526ea658470f810c487bcb88da608904e8b..da6dc67aba5c4f52ec9e9dcd819aebf131d33084 100644 (file)
@@ -16,7 +16,7 @@ use bitcoin::hashes::{Hash, HashEngine};
 use bitcoin::hashes::sha256::Hash as Sha256;
 
 use bitcoin::secp256k1::Secp256k1;
-use bitcoin::secp256k1::key::{PublicKey,SecretKey};
+use bitcoin::secp256k1::{PublicKey,SecretKey};
 use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1;
 
@@ -163,7 +163,7 @@ impl PeerChannelEncryptor {
 
        #[inline]
        fn hkdf(state: &mut BidirectionalNoiseState, ss: SharedSecret) -> [u8; 32] {
-               let (t1, t2) = hkdf_extract_expand_twice(&state.ck, &ss[..]);
+               let (t1, t2) = hkdf_extract_expand_twice(&state.ck, ss.as_ref());
                state.ck = t1;
                t2
        }
@@ -473,7 +473,7 @@ impl PeerChannelEncryptor {
 mod tests {
        use super::LN_MAX_MSG_LEN;
 
-       use bitcoin::secp256k1::key::{PublicKey,SecretKey};
+       use bitcoin::secp256k1::{PublicKey,SecretKey};
 
        use hex;
 
index d12f8c06eed0409630864359e54c20d3b714930d..4b777b0b99446e0d4781e6029592b60ee0e24ff7 100644 (file)
@@ -15,7 +15,7 @@
 //! call into the provided message handlers (probably a ChannelManager and NetGraphmsgHandler) with messages
 //! they should handle, and encoding/sending response messages.
 
-use bitcoin::secp256k1::key::{SecretKey,PublicKey};
+use bitcoin::secp256k1::{SecretKey,PublicKey};
 
 use ln::features::InitFeatures;
 use ln::msgs;
@@ -33,7 +33,8 @@ use routing::network_graph::{NetworkGraph, NetGraphMsgHandler};
 use prelude::*;
 use io;
 use alloc::collections::LinkedList;
-use sync::{Arc, Mutex};
+use sync::{Arc, Mutex, MutexGuard, FairRwLock};
+use core::sync::atomic::{AtomicBool, Ordering};
 use core::{cmp, hash, fmt, mem};
 use core::ops::Deref;
 use core::convert::Infallible;
@@ -257,8 +258,13 @@ pub trait SocketDescriptor : cmp::Eq + hash::Hash + Clone {
 /// descriptor.
 #[derive(Clone)]
 pub struct PeerHandleError {
-       /// Used to indicate that we probably can't make any future connections to this peer, implying
-       /// we should go ahead and force-close any channels we have with it.
+       /// Used to indicate that we probably can't make any future connections to this peer (e.g.
+       /// because we required features that our peer was missing, or vice versa).
+       ///
+       /// While LDK's [`ChannelManager`] will not do it automatically, you likely wish to force-close
+       /// any channels with this peer or check for new versions of LDK.
+       ///
+       /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
        pub no_connection_possible: bool,
 }
 impl fmt::Debug for PeerHandleError {
@@ -339,6 +345,7 @@ struct Peer {
        msgs_sent_since_pong: usize,
        awaiting_pong_timer_tick_intervals: i8,
        received_message_since_timer_tick: bool,
+       sent_gossip_timestamp_filter: bool,
 }
 
 impl Peer {
@@ -348,7 +355,11 @@ impl Peer {
        /// announcements/updates for the given channel_id then we will send it when we get to that
        /// point and we shouldn't send it yet to avoid sending duplicate updates. If we've already
        /// sent the old versions, we should send the update, and so return true here.
-       fn should_forward_channel_announcement(&self, channel_id: u64)->bool{
+       fn should_forward_channel_announcement(&self, channel_id: u64) -> bool {
+               if self.their_features.as_ref().unwrap().supports_gossip_queries() &&
+                       !self.sent_gossip_timestamp_filter {
+                               return false;
+                       }
                match self.sync_status {
                        InitSyncTracker::NoSyncRequested => true,
                        InitSyncTracker::ChannelsSyncing(i) => i < channel_id,
@@ -358,6 +369,10 @@ impl Peer {
 
        /// Similar to the above, but for node announcements indexed by node_id.
        fn should_forward_node_announcement(&self, node_id: PublicKey) -> bool {
+               if self.their_features.as_ref().unwrap().supports_gossip_queries() &&
+                       !self.sent_gossip_timestamp_filter {
+                               return false;
+                       }
                match self.sync_status {
                        InitSyncTracker::NoSyncRequested => true,
                        InitSyncTracker::ChannelsSyncing(_) => false,
@@ -366,12 +381,6 @@ impl Peer {
        }
 }
 
-struct PeerHolder<Descriptor: SocketDescriptor> {
-       peers: HashMap<Descriptor, Peer>,
-       /// Only add to this set when noise completes:
-       node_id_to_descriptor: HashMap<PublicKey, Descriptor>,
-}
-
 /// SimpleArcPeerManager is useful when you need a PeerManager with a static lifetime, e.g.
 /// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static
 /// lifetimes). Other times you can afford a reference, which is more efficient, in which case
@@ -416,7 +425,29 @@ pub struct PeerManager<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: De
                L::Target: Logger,
                CMH::Target: CustomMessageHandler {
        message_handler: MessageHandler<CM, RM>,
-       peers: Mutex<PeerHolder<Descriptor>>,
+       /// Connection state for each connected peer - we have an outer read-write lock which is taken
+       /// as read while we're doing processing for a peer and taken write when a peer is being added
+       /// or removed.
+       ///
+       /// The inner Peer lock is held for sending and receiving bytes, but note that we do *not* hold
+       /// it while we're processing a message. This is fine as [`PeerManager::read_event`] requires
+       /// that there be no parallel calls for a given peer, so mutual exclusion of messages handed to
+       /// the `MessageHandler`s for a given peer is already guaranteed.
+       peers: FairRwLock<HashMap<Descriptor, Mutex<Peer>>>,
+       /// Only add to this set when noise completes.
+       /// Locked *after* peers. When an item is removed, it must be removed with the `peers` write
+       /// lock held. Entries may be added with only the `peers` read lock held (though the
+       /// `Descriptor` value must already exist in `peers`).
+       node_id_to_descriptor: Mutex<HashMap<PublicKey, Descriptor>>,
+       /// We can only have one thread processing events at once, but we don't usually need the full
+       /// `peers` write lock to do so, so instead we block on this empty mutex when entering
+       /// `process_events`.
+       event_processing_lock: Mutex<()>,
+       /// Because event processing is global and always does all available work before returning,
+       /// there is no reason for us to have many event processors waiting on the lock at once.
+       /// Instead, we limit the total blocked event processors to always exactly one by setting this
+       /// when an event process call is waiting.
+       blocked_event_processors: AtomicBool,
        our_node_secret: SecretKey,
        ephemeral_key_midstate: Sha256Engine,
        custom_message_handler: CMH,
@@ -544,10 +575,10 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
 
                PeerManager {
                        message_handler,
-                       peers: Mutex::new(PeerHolder {
-                               peers: HashMap::new(),
-                               node_id_to_descriptor: HashMap::new()
-                       }),
+                       peers: FairRwLock::new(HashMap::new()),
+                       node_id_to_descriptor: Mutex::new(HashMap::new()),
+                       event_processing_lock: Mutex::new(()),
+                       blocked_event_processors: AtomicBool::new(false),
                        our_node_secret,
                        ephemeral_key_midstate,
                        peer_counter: AtomicCounter::new(),
@@ -562,8 +593,9 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
        /// new_outbound_connection, however entries will only appear once the initial handshake has
        /// completed and we are sure the remote peer has the private key for the given node_id.
        pub fn get_peer_node_ids(&self) -> Vec<PublicKey> {
-               let peers = self.peers.lock().unwrap();
-               peers.peers.values().filter_map(|p| {
+               let peers = self.peers.read().unwrap();
+               peers.values().filter_map(|peer_mutex| {
+                       let p = peer_mutex.lock().unwrap();
                        if !p.channel_encryptor.is_ready_for_encryption() || p.their_features.is_none() {
                                return None;
                        }
@@ -599,8 +631,8 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                let res = peer_encryptor.get_act_one().to_vec();
                let pending_read_buffer = [0; 50].to_vec(); // Noise act two is 50 bytes
 
-               let mut peers = self.peers.lock().unwrap();
-               if peers.peers.insert(descriptor, Peer {
+               let mut peers = self.peers.write().unwrap();
+               if peers.insert(descriptor, Mutex::new(Peer {
                        channel_encryptor: peer_encryptor,
                        their_node_id: None,
                        their_features: None,
@@ -619,7 +651,8 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                        msgs_sent_since_pong: 0,
                        awaiting_pong_timer_tick_intervals: 0,
                        received_message_since_timer_tick: false,
-               }).is_some() {
+                       sent_gossip_timestamp_filter: false,
+               })).is_some() {
                        panic!("PeerManager driver duplicated descriptors!");
                };
                Ok(res)
@@ -645,8 +678,8 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                let peer_encryptor = PeerChannelEncryptor::new_inbound(&self.our_node_secret);
                let pending_read_buffer = [0; 50].to_vec(); // Noise act one is 50 bytes
 
-               let mut peers = self.peers.lock().unwrap();
-               if peers.peers.insert(descriptor, Peer {
+               let mut peers = self.peers.write().unwrap();
+               if peers.insert(descriptor, Mutex::new(Peer {
                        channel_encryptor: peer_encryptor,
                        their_node_id: None,
                        their_features: None,
@@ -665,7 +698,8 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                        msgs_sent_since_pong: 0,
                        awaiting_pong_timer_tick_intervals: 0,
                        received_message_since_timer_tick: false,
-               }).is_some() {
+                       sent_gossip_timestamp_filter: false,
+               })).is_some() {
                        panic!("PeerManager driver duplicated descriptors!");
                };
                Ok(())
@@ -755,17 +789,18 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
        /// [`send_data`]: SocketDescriptor::send_data
        /// [`write_buffer_space_avail`]: PeerManager::write_buffer_space_avail
        pub fn write_buffer_space_avail(&self, descriptor: &mut Descriptor) -> Result<(), PeerHandleError> {
-               let mut peers = self.peers.lock().unwrap();
-               match peers.peers.get_mut(descriptor) {
+               let peers = self.peers.read().unwrap();
+               match peers.get(descriptor) {
                        None => {
                                // This is most likely a simple race condition where the user found that the socket
                                // was writeable, then we told the user to `disconnect_socket()`, then they called
                                // this method. Return an error to make sure we get disconnected.
                                return Err(PeerHandleError { no_connection_possible: false });
                        },
-                       Some(peer) => {
+                       Some(peer_mutex) => {
+                               let mut peer = peer_mutex.lock().unwrap();
                                peer.awaiting_write_event = false;
-                               self.do_attempt_write_data(descriptor, peer);
+                               self.do_attempt_write_data(descriptor, &mut peer);
                        }
                };
                Ok(())
@@ -816,207 +851,218 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
        }
 
        fn do_read_event(&self, peer_descriptor: &mut Descriptor, data: &[u8]) -> Result<bool, PeerHandleError> {
-               let pause_read = {
-                       let mut peers_lock = self.peers.lock().unwrap();
-                       let peers = &mut *peers_lock;
-                       let mut msgs_to_forward = Vec::new();
-                       let mut peer_node_id = None;
-                       let pause_read = match peers.peers.get_mut(peer_descriptor) {
-                               None => {
-                                       // This is most likely a simple race condition where the user read some bytes
-                                       // from the socket, then we told the user to `disconnect_socket()`, then they
-                                       // called this method. Return an error to make sure we get disconnected.
-                                       return Err(PeerHandleError { no_connection_possible: false });
-                               },
-                               Some(peer) => {
+               let mut pause_read = false;
+               let peers = self.peers.read().unwrap();
+               let mut msgs_to_forward = Vec::new();
+               let mut peer_node_id = None;
+               match peers.get(peer_descriptor) {
+                       None => {
+                               // This is most likely a simple race condition where the user read some bytes
+                               // from the socket, then we told the user to `disconnect_socket()`, then they
+                               // called this method. Return an error to make sure we get disconnected.
+                               return Err(PeerHandleError { no_connection_possible: false });
+                       },
+                       Some(peer_mutex) => {
+                               let mut read_pos = 0;
+                               while read_pos < data.len() {
+                                       macro_rules! try_potential_handleerror {
+                                               ($peer: expr, $thing: expr) => {
+                                                       match $thing {
+                                                               Ok(x) => x,
+                                                               Err(e) => {
+                                                                       match e.action {
+                                                                               msgs::ErrorAction::DisconnectPeer { msg: _ } => {
+                                                                                       //TODO: Try to push msg
+                                                                                       log_debug!(self.logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err);
+                                                                                       return Err(PeerHandleError{ no_connection_possible: false });
+                                                                               },
+                                                                               msgs::ErrorAction::IgnoreAndLog(level) => {
+                                                                                       log_given_level!(self.logger, level, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err);
+                                                                                       continue
+                                                                               },
+                                                                               msgs::ErrorAction::IgnoreDuplicateGossip => continue, // Don't even bother logging these
+                                                                               msgs::ErrorAction::IgnoreError => {
+                                                                                       log_debug!(self.logger, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err);
+                                                                                       continue;
+                                                                               },
+                                                                               msgs::ErrorAction::SendErrorMessage { msg } => {
+                                                                                       log_debug!(self.logger, "Error handling message{}; sending error message with: {}", OptionalFromDebugger(&peer_node_id), e.err);
+                                                                                       self.enqueue_message($peer, &msg);
+                                                                                       continue;
+                                                                               },
+                                                                               msgs::ErrorAction::SendWarningMessage { msg, log_level } => {
+                                                                                       log_given_level!(self.logger, log_level, "Error handling message{}; sending warning message with: {}", OptionalFromDebugger(&peer_node_id), e.err);
+                                                                                       self.enqueue_message($peer, &msg);
+                                                                                       continue;
+                                                                               },
+                                                                       }
+                                                               }
+                                                       }
+                                               }
+                                       }
+
+                                       let mut peer_lock = peer_mutex.lock().unwrap();
+                                       let peer = &mut *peer_lock;
+                                       let mut msg_to_handle = None;
+                                       if peer_node_id.is_none() {
+                                               peer_node_id = peer.their_node_id.clone();
+                                       }
+
                                        assert!(peer.pending_read_buffer.len() > 0);
                                        assert!(peer.pending_read_buffer.len() > peer.pending_read_buffer_pos);
 
-                                       let mut read_pos = 0;
-                                       while read_pos < data.len() {
-                                               {
-                                                       let data_to_copy = cmp::min(peer.pending_read_buffer.len() - peer.pending_read_buffer_pos, data.len() - read_pos);
-                                                       peer.pending_read_buffer[peer.pending_read_buffer_pos..peer.pending_read_buffer_pos + data_to_copy].copy_from_slice(&data[read_pos..read_pos + data_to_copy]);
-                                                       read_pos += data_to_copy;
-                                                       peer.pending_read_buffer_pos += data_to_copy;
+                                       {
+                                               let data_to_copy = cmp::min(peer.pending_read_buffer.len() - peer.pending_read_buffer_pos, data.len() - read_pos);
+                                               peer.pending_read_buffer[peer.pending_read_buffer_pos..peer.pending_read_buffer_pos + data_to_copy].copy_from_slice(&data[read_pos..read_pos + data_to_copy]);
+                                               read_pos += data_to_copy;
+                                               peer.pending_read_buffer_pos += data_to_copy;
+                                       }
+
+                                       if peer.pending_read_buffer_pos == peer.pending_read_buffer.len() {
+                                               peer.pending_read_buffer_pos = 0;
+
+                                               macro_rules! insert_node_id {
+                                                       () => {
+                                                               match self.node_id_to_descriptor.lock().unwrap().entry(peer.their_node_id.unwrap()) {
+                                                                       hash_map::Entry::Occupied(_) => {
+                                                                               log_trace!(self.logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap()));
+                                                                               peer.their_node_id = None; // Unset so that we don't generate a peer_disconnected event
+                                                                               return Err(PeerHandleError{ no_connection_possible: false })
+                                                                       },
+                                                                       hash_map::Entry::Vacant(entry) => {
+                                                                               log_debug!(self.logger, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap()));
+                                                                               entry.insert(peer_descriptor.clone())
+                                                                       },
+                                                               };
+                                                       }
                                                }
 
-                                               if peer.pending_read_buffer_pos == peer.pending_read_buffer.len() {
-                                                       peer.pending_read_buffer_pos = 0;
+                                               let next_step = peer.channel_encryptor.get_noise_step();
+                                               match next_step {
+                                                       NextNoiseStep::ActOne => {
+                                                               let act_two = try_potential_handleerror!(peer,
+                                                                       peer.channel_encryptor.process_act_one_with_keys(&peer.pending_read_buffer[..], &self.our_node_secret, self.get_ephemeral_key())).to_vec();
+                                                               peer.pending_outbound_buffer.push_back(act_two);
+                                                               peer.pending_read_buffer = [0; 66].to_vec(); // act three is 66 bytes long
+                                                       },
+                                                       NextNoiseStep::ActTwo => {
+                                                               let (act_three, their_node_id) = try_potential_handleerror!(peer,
+                                                                       peer.channel_encryptor.process_act_two(&peer.pending_read_buffer[..], &self.our_node_secret));
+                                                               peer.pending_outbound_buffer.push_back(act_three.to_vec());
+                                                               peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes
+                                                               peer.pending_read_is_header = true;
+
+                                                               peer.their_node_id = Some(their_node_id);
+                                                               insert_node_id!();
+                                                               let features = InitFeatures::known();
+                                                               let resp = msgs::Init { features, remote_network_address: filter_addresses(peer.their_net_address.clone()) };
+                                                               self.enqueue_message(peer, &resp);
+                                                               peer.awaiting_pong_timer_tick_intervals = 0;
+                                                       },
+                                                       NextNoiseStep::ActThree => {
+                                                               let their_node_id = try_potential_handleerror!(peer,
+                                                                       peer.channel_encryptor.process_act_three(&peer.pending_read_buffer[..]));
+                                                               peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes
+                                                               peer.pending_read_is_header = true;
+                                                               peer.their_node_id = Some(their_node_id);
+                                                               insert_node_id!();
+                                                               let features = InitFeatures::known();
+                                                               let resp = msgs::Init { features, remote_network_address: filter_addresses(peer.their_net_address.clone()) };
+                                                               self.enqueue_message(peer, &resp);
+                                                               peer.awaiting_pong_timer_tick_intervals = 0;
+                                                       },
+                                                       NextNoiseStep::NoiseComplete => {
+                                                               if peer.pending_read_is_header {
+                                                                       let msg_len = try_potential_handleerror!(peer,
+                                                                               peer.channel_encryptor.decrypt_length_header(&peer.pending_read_buffer[..]));
+                                                                       if peer.pending_read_buffer.capacity() > 8192 { peer.pending_read_buffer = Vec::new(); }
+                                                                       peer.pending_read_buffer.resize(msg_len as usize + 16, 0);
+                                                                       if msg_len < 2 { // Need at least the message type tag
+                                                                               return Err(PeerHandleError{ no_connection_possible: false });
+                                                                       }
+                                                                       peer.pending_read_is_header = false;
+                                                               } else {
+                                                                       let msg_data = try_potential_handleerror!(peer,
+                                                                               peer.channel_encryptor.decrypt_message(&peer.pending_read_buffer[..]));
+                                                                       assert!(msg_data.len() >= 2);
+
+                                                                       // Reset read buffer
+                                                                       if peer.pending_read_buffer.capacity() > 8192 { peer.pending_read_buffer = Vec::new(); }
+                                                                       peer.pending_read_buffer.resize(18, 0);
+                                                                       peer.pending_read_is_header = true;
 
-                                                       macro_rules! try_potential_handleerror {
-                                                               ($thing: expr) => {
-                                                                       match $thing {
+                                                                       let mut reader = io::Cursor::new(&msg_data[..]);
+                                                                       let message_result = wire::read(&mut reader, &*self.custom_message_handler);
+                                                                       let message = match message_result {
                                                                                Ok(x) => x,
                                                                                Err(e) => {
-                                                                                       match e.action {
-                                                                                               msgs::ErrorAction::DisconnectPeer { msg: _ } => {
-                                                                                                       //TODO: Try to push msg
-                                                                                                       log_debug!(self.logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer.their_node_id), e.err);
-                                                                                                       return Err(PeerHandleError{ no_connection_possible: false });
-                                                                                               },
-                                                                                               msgs::ErrorAction::IgnoreAndLog(level) => {
-                                                                                                       log_given_level!(self.logger, level, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer.their_node_id), e.err);
-                                                                                                       continue
-                                                                                               },
-                                                                                               msgs::ErrorAction::IgnoreDuplicateGossip => continue, // Don't even bother logging these
-                                                                                               msgs::ErrorAction::IgnoreError => {
-                                                                                                       log_debug!(self.logger, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer.their_node_id), e.err);
+                                                                                       match e {
+                                                                                               // Note that to avoid recursion we never call
+                                                                                               // `do_attempt_write_data` from here, causing
+                                                                                               // the messages enqueued here to not actually
+                                                                                               // be sent before the peer is disconnected.
+                                                                                               (msgs::DecodeError::UnknownRequiredFeature, Some(ty)) if is_gossip_msg(ty) => {
+                                                                                                       log_gossip!(self.logger, "Got a channel/node announcement with an unknown required feature flag, you may want to update!");
                                                                                                        continue;
-                                                                                               },
-                                                                                               msgs::ErrorAction::SendErrorMessage { msg } => {
-                                                                                                       log_debug!(self.logger, "Error handling message{}; sending error message with: {}", OptionalFromDebugger(&peer.their_node_id), e.err);
-                                                                                                       self.enqueue_message(peer, &msg);
+                                                                                               }
+                                                                                               (msgs::DecodeError::UnsupportedCompression, _) => {
+                                                                                                       log_gossip!(self.logger, "We don't support zlib-compressed message fields, sending a warning and ignoring message");
+                                                                                                       self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: "Unsupported message compression: zlib".to_owned() });
                                                                                                        continue;
-                                                                                               },
-                                                                                               msgs::ErrorAction::SendWarningMessage { msg, log_level } => {
-                                                                                                       log_given_level!(self.logger, log_level, "Error handling message{}; sending warning message with: {}", OptionalFromDebugger(&peer.their_node_id), e.err);
-                                                                                                       self.enqueue_message(peer, &msg);
+                                                                                               }
+                                                                                               (_, Some(ty)) if is_gossip_msg(ty) => {
+                                                                                                       log_gossip!(self.logger, "Got an invalid value while deserializing a gossip message");
+                                                                                                       self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: "Unreadable/bogus gossip message".to_owned() });
                                                                                                        continue;
-                                                                                               },
+                                                                                               }
+                                                                                               (msgs::DecodeError::UnknownRequiredFeature, ty) => {
+                                                                                                       log_gossip!(self.logger, "Received a message with an unknown required feature flag or TLV, you may want to update!");
+                                                                                                       self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: format!("Received an unknown required feature/TLV in message type {:?}", ty) });
+                                                                                                       return Err(PeerHandleError { no_connection_possible: false });
+                                                                                               }
+                                                                                               (msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { no_connection_possible: false }),
+                                                                                               (msgs::DecodeError::InvalidValue, _) => {
+                                                                                                       log_debug!(self.logger, "Got an invalid value while deserializing message");
+                                                                                                       return Err(PeerHandleError { no_connection_possible: false });
+                                                                                               }
+                                                                                               (msgs::DecodeError::ShortRead, _) => {
+                                                                                                       log_debug!(self.logger, "Deserialization failed due to shortness of message");
+                                                                                                       return Err(PeerHandleError { no_connection_possible: false });
+                                                                                               }
+                                                                                               (msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { no_connection_possible: false }),
+                                                                                               (msgs::DecodeError::Io(_), _) => return Err(PeerHandleError { no_connection_possible: false }),
                                                                                        }
                                                                                }
-                                                                       }
-                                                               }
-                                                       }
-
-                                                       macro_rules! insert_node_id {
-                                                               () => {
-                                                                       match peers.node_id_to_descriptor.entry(peer.their_node_id.unwrap()) {
-                                                                               hash_map::Entry::Occupied(_) => {
-                                                                                       log_trace!(self.logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap()));
-                                                                                       peer.their_node_id = None; // Unset so that we don't generate a peer_disconnected event
-                                                                                       return Err(PeerHandleError{ no_connection_possible: false })
-                                                                               },
-                                                                               hash_map::Entry::Vacant(entry) => {
-                                                                                       log_debug!(self.logger, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap()));
-                                                                                       entry.insert(peer_descriptor.clone())
-                                                                               },
                                                                        };
-                                                               }
-                                                       }
-
-                                                       let next_step = peer.channel_encryptor.get_noise_step();
-                                                       match next_step {
-                                                               NextNoiseStep::ActOne => {
-                                                                       let act_two = try_potential_handleerror!(peer.channel_encryptor.process_act_one_with_keys(&peer.pending_read_buffer[..], &self.our_node_secret, self.get_ephemeral_key())).to_vec();
-                                                                       peer.pending_outbound_buffer.push_back(act_two);
-                                                                       peer.pending_read_buffer = [0; 66].to_vec(); // act three is 66 bytes long
-                                                               },
-                                                               NextNoiseStep::ActTwo => {
-                                                                       let (act_three, their_node_id) = try_potential_handleerror!(peer.channel_encryptor.process_act_two(&peer.pending_read_buffer[..], &self.our_node_secret));
-                                                                       peer.pending_outbound_buffer.push_back(act_three.to_vec());
-                                                                       peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes
-                                                                       peer.pending_read_is_header = true;
 
-                                                                       peer.their_node_id = Some(their_node_id);
-                                                                       insert_node_id!();
-                                                                       let features = InitFeatures::known();
-                                                                       let resp = msgs::Init { features, remote_network_address: filter_addresses(peer.their_net_address.clone())};
-                                                                       self.enqueue_message(peer, &resp);
-                                                                       peer.awaiting_pong_timer_tick_intervals = 0;
-                                                               },
-                                                               NextNoiseStep::ActThree => {
-                                                                       let their_node_id = try_potential_handleerror!(peer.channel_encryptor.process_act_three(&peer.pending_read_buffer[..]));
-                                                                       peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes
-                                                                       peer.pending_read_is_header = true;
-                                                                       peer.their_node_id = Some(their_node_id);
-                                                                       insert_node_id!();
-                                                                       let features = InitFeatures::known();
-                                                                       let resp = msgs::Init { features, remote_network_address: filter_addresses(peer.their_net_address.clone())};
-                                                                       self.enqueue_message(peer, &resp);
-                                                                       peer.awaiting_pong_timer_tick_intervals = 0;
-                                                               },
-                                                               NextNoiseStep::NoiseComplete => {
-                                                                       if peer.pending_read_is_header {
-                                                                               let msg_len = try_potential_handleerror!(peer.channel_encryptor.decrypt_length_header(&peer.pending_read_buffer[..]));
-                                                                               peer.pending_read_buffer = Vec::with_capacity(msg_len as usize + 16);
-                                                                               peer.pending_read_buffer.resize(msg_len as usize + 16, 0);
-                                                                               if msg_len < 2 { // Need at least the message type tag
-                                                                                       return Err(PeerHandleError{ no_connection_possible: false });
-                                                                               }
-                                                                               peer.pending_read_is_header = false;
-                                                                       } else {
-                                                                               let msg_data = try_potential_handleerror!(peer.channel_encryptor.decrypt_message(&peer.pending_read_buffer[..]));
-                                                                               assert!(msg_data.len() >= 2);
-
-                                                                               // Reset read buffer
-                                                                               peer.pending_read_buffer = [0; 18].to_vec();
-                                                                               peer.pending_read_is_header = true;
-
-                                                                               let mut reader = io::Cursor::new(&msg_data[..]);
-                                                                               let message_result = wire::read(&mut reader, &*self.custom_message_handler);
-                                                                               let message = match message_result {
-                                                                                       Ok(x) => x,
-                                                                                       Err(e) => {
-                                                                                               match e {
-                                                                                                       // Note that to avoid recursion we never call
-                                                                                                       // `do_attempt_write_data` from here, causing
-                                                                                                       // the messages enqueued here to not actually
-                                                                                                       // be sent before the peer is disconnected.
-                                                                                                       (msgs::DecodeError::UnknownRequiredFeature, Some(ty)) if is_gossip_msg(ty) => {
-                                                                                                               log_gossip!(self.logger, "Got a channel/node announcement with an unknown required feature flag, you may want to update!");
-                                                                                                               continue;
-                                                                                                       }
-                                                                                                       (msgs::DecodeError::UnsupportedCompression, _) => {
-                                                                                                               log_gossip!(self.logger, "We don't support zlib-compressed message fields, sending a warning and ignoring message");
-                                                                                                               self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: "Unsupported message compression: zlib".to_owned() });
-                                                                                                               continue;
-                                                                                                       }
-                                                                                                       (_, Some(ty)) if is_gossip_msg(ty) => {
-                                                                                                               log_gossip!(self.logger, "Got an invalid value while deserializing a gossip message");
-                                                                                                               self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: "Unreadable/bogus gossip message".to_owned() });
-                                                                                                               continue;
-                                                                                                       }
-                                                                                                       (msgs::DecodeError::UnknownRequiredFeature, ty) => {
-                                                                                                               log_gossip!(self.logger, "Received a message with an unknown required feature flag or TLV, you may want to update!");
-                                                                                                               self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: format!("Received an unknown required feature/TLV in message type {:?}", ty) });
-                                                                                                               return Err(PeerHandleError { no_connection_possible: false });
-                                                                                                       }
-                                                                                                       (msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { no_connection_possible: false }),
-                                                                                                       (msgs::DecodeError::InvalidValue, _) => {
-                                                                                                               log_debug!(self.logger, "Got an invalid value while deserializing message");
-                                                                                                               return Err(PeerHandleError { no_connection_possible: false });
-                                                                                                       }
-                                                                                                       (msgs::DecodeError::ShortRead, _) => {
-                                                                                                               log_debug!(self.logger, "Deserialization failed due to shortness of message");
-                                                                                                               return Err(PeerHandleError { no_connection_possible: false });
-                                                                                                       }
-                                                                                                       (msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { no_connection_possible: false }),
-                                                                                                       (msgs::DecodeError::Io(_), _) => return Err(PeerHandleError { no_connection_possible: false }),
-                                                                                               }
-                                                                                       }
-                                                                               };
-
-                                                                               match self.handle_message(peer, message) {
-                                                                                       Err(handling_error) => match handling_error {
-                                                                                               MessageHandlingError::PeerHandleError(e) => { return Err(e) },
-                                                                                               MessageHandlingError::LightningError(e) => {
-                                                                                                       try_potential_handleerror!(Err(e));
-                                                                                               },
-                                                                                       },
-                                                                                       Ok(Some(msg)) => {
-                                                                                               peer_node_id = Some(peer.their_node_id.expect("After noise is complete, their_node_id is always set"));
-                                                                                               msgs_to_forward.push(msg);
-                                                                                       },
-                                                                                       Ok(None) => {},
-                                                                               }
-                                                                       }
+                                                                       msg_to_handle = Some(message);
                                                                }
                                                        }
                                                }
                                        }
-
-                                       peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_READ_PAUSE // pause_read
+                                       pause_read = peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_READ_PAUSE;
+
+                                       if let Some(message) = msg_to_handle {
+                                               match self.handle_message(&peer_mutex, peer_lock, message) {
+                                                       Err(handling_error) => match handling_error {
+                                                               MessageHandlingError::PeerHandleError(e) => { return Err(e) },
+                                                               MessageHandlingError::LightningError(e) => {
+                                                                       try_potential_handleerror!(&mut peer_mutex.lock().unwrap(), Err(e));
+                                                               },
+                                                       },
+                                                       Ok(Some(msg)) => {
+                                                               msgs_to_forward.push(msg);
+                                                       },
+                                                       Ok(None) => {},
+                                               }
+                                       }
                                }
-                       };
-
-                       for msg in msgs_to_forward.drain(..) {
-                               self.forward_broadcast_msg(peers, &msg, peer_node_id.as_ref());
                        }
+               }
 
-                       pause_read
-               };
+               for msg in msgs_to_forward.drain(..) {
+                       self.forward_broadcast_msg(&*peers, &msg, peer_node_id.as_ref());
+               }
 
                Ok(pause_read)
        }
@@ -1025,51 +1071,74 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
        /// Returns the message back if it needs to be broadcasted to all other peers.
        fn handle_message(
                &self,
-               peer: &mut Peer,
+               peer_mutex: &Mutex<Peer>,
+               mut peer_lock: MutexGuard<Peer>,
                message: wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>
        ) -> Result<Option<wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> {
-               if is_gossip_msg(message.type_id()) {
-                       log_gossip!(self.logger, "Received message {:?} from {}", message, log_pubkey!(peer.their_node_id.unwrap()));
-               } else {
-                       log_trace!(self.logger, "Received message {:?} from {}", message, log_pubkey!(peer.their_node_id.unwrap()));
-               }
-
-               peer.received_message_since_timer_tick = true;
+               let their_node_id = peer_lock.their_node_id.clone().expect("We know the peer's public key by the time we receive messages");
+               peer_lock.received_message_since_timer_tick = true;
 
                // Need an Init as first message
-               if let wire::Message::Init(_) = message {
-               } else if peer.their_features.is_none() {
-                       log_debug!(self.logger, "Peer {} sent non-Init first message", log_pubkey!(peer.their_node_id.unwrap()));
+               if let wire::Message::Init(msg) = message {
+                       if msg.features.requires_unknown_bits() {
+                               log_debug!(self.logger, "Peer features required unknown version bits");
+                               return Err(PeerHandleError{ no_connection_possible: true }.into());
+                       }
+                       if peer_lock.their_features.is_some() {
+                               return Err(PeerHandleError{ no_connection_possible: false }.into());
+                       }
+
+                       log_info!(self.logger, "Received peer Init message from {}: {}", log_pubkey!(their_node_id), msg.features);
+
+                       // For peers not supporting gossip queries start sync now, otherwise wait until we receive a filter.
+                       if msg.features.initial_routing_sync() && !msg.features.supports_gossip_queries() {
+                               peer_lock.sync_status = InitSyncTracker::ChannelsSyncing(0);
+                       }
+
+                       if !msg.features.supports_static_remote_key() {
+                               log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting with no_connection_possible", log_pubkey!(their_node_id));
+                               return Err(PeerHandleError{ no_connection_possible: true }.into());
+                       }
+
+                       self.message_handler.route_handler.peer_connected(&their_node_id, &msg);
+
+                       self.message_handler.chan_handler.peer_connected(&their_node_id, &msg);
+                       peer_lock.their_features = Some(msg.features);
+                       return Ok(None);
+               } else if peer_lock.their_features.is_none() {
+                       log_debug!(self.logger, "Peer {} sent non-Init first message", log_pubkey!(their_node_id));
                        return Err(PeerHandleError{ no_connection_possible: false }.into());
                }
 
-               let mut should_forward = None;
-
-               match message {
-                       // Setup and Control messages:
-                       wire::Message::Init(msg) => {
-                               if msg.features.requires_unknown_bits() {
-                                       log_debug!(self.logger, "Peer features required unknown version bits");
-                                       return Err(PeerHandleError{ no_connection_possible: true }.into());
-                               }
-                               if peer.their_features.is_some() {
-                                       return Err(PeerHandleError{ no_connection_possible: false }.into());
-                               }
+               if let wire::Message::GossipTimestampFilter(_msg) = message {
+                       // When supporting gossip messages, start inital gossip sync only after we receive
+                       // a GossipTimestampFilter
+                       if peer_lock.their_features.as_ref().unwrap().supports_gossip_queries() &&
+                               !peer_lock.sent_gossip_timestamp_filter {
+                               peer_lock.sent_gossip_timestamp_filter = true;
+                               peer_lock.sync_status = InitSyncTracker::ChannelsSyncing(0);
+                       }
+                       return Ok(None);
+               }
 
-                               log_info!(self.logger, "Received peer Init message from {}: {}", log_pubkey!(peer.their_node_id.unwrap()), msg.features);
+               let their_features = peer_lock.their_features.clone();
+               mem::drop(peer_lock);
 
-                               if msg.features.initial_routing_sync() {
-                                       peer.sync_status = InitSyncTracker::ChannelsSyncing(0);
-                               }
-                               if !msg.features.supports_static_remote_key() {
-                                       log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting with no_connection_possible", log_pubkey!(peer.their_node_id.unwrap()));
-                                       return Err(PeerHandleError{ no_connection_possible: true }.into());
-                               }
+               if is_gossip_msg(message.type_id()) {
+                       log_gossip!(self.logger, "Received message {:?} from {}", message, log_pubkey!(their_node_id));
+               } else {
+                       log_trace!(self.logger, "Received message {:?} from {}", message, log_pubkey!(their_node_id));
+               }
 
-                               self.message_handler.route_handler.peer_connected(&peer.their_node_id.unwrap(), &msg);
+               let mut should_forward = None;
 
-                               self.message_handler.chan_handler.peer_connected(&peer.their_node_id.unwrap(), &msg);
-                               peer.their_features = Some(msg.features);
+               match message {
+                       // Setup and Control messages:
+                       wire::Message::Init(_) => {
+                               // Handled above
+                       },
+                       wire::Message::GossipTimestampFilter(_) => {
+                               // Handled above
                        },
                        wire::Message::Error(msg) => {
                                let mut data_is_printable = true;
@@ -1081,11 +1150,11 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                }
 
                                if data_is_printable {
-                                       log_debug!(self.logger, "Got Err message from {}: {}", log_pubkey!(peer.their_node_id.unwrap()), msg.data);
+                                       log_debug!(self.logger, "Got Err message from {}: {}", log_pubkey!(their_node_id), msg.data);
                                } else {
-                                       log_debug!(self.logger, "Got Err message from {} with non-ASCII error message", log_pubkey!(peer.their_node_id.unwrap()));
+                                       log_debug!(self.logger, "Got Err message from {} with non-ASCII error message", log_pubkey!(their_node_id));
                                }
-                               self.message_handler.chan_handler.handle_error(&peer.their_node_id.unwrap(), &msg);
+                               self.message_handler.chan_handler.handle_error(&their_node_id, &msg);
                                if msg.channel_id == [0; 32] {
                                        return Err(PeerHandleError{ no_connection_possible: true }.into());
                                }
@@ -1100,78 +1169,79 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                }
 
                                if data_is_printable {
-                                       log_debug!(self.logger, "Got warning message from {}: {}", log_pubkey!(peer.their_node_id.unwrap()), msg.data);
+                                       log_debug!(self.logger, "Got warning message from {}: {}", log_pubkey!(their_node_id), msg.data);
                                } else {
-                                       log_debug!(self.logger, "Got warning message from {} with non-ASCII error message", log_pubkey!(peer.their_node_id.unwrap()));
+                                       log_debug!(self.logger, "Got warning message from {} with non-ASCII error message", log_pubkey!(their_node_id));
                                }
                        },
 
                        wire::Message::Ping(msg) => {
                                if msg.ponglen < 65532 {
                                        let resp = msgs::Pong { byteslen: msg.ponglen };
-                                       self.enqueue_message(peer, &resp);
+                                       self.enqueue_message(&mut *peer_mutex.lock().unwrap(), &resp);
                                }
                        },
                        wire::Message::Pong(_msg) => {
-                               peer.awaiting_pong_timer_tick_intervals = 0;
-                               peer.msgs_sent_since_pong = 0;
+                               let mut peer_lock = peer_mutex.lock().unwrap();
+                               peer_lock.awaiting_pong_timer_tick_intervals = 0;
+                               peer_lock.msgs_sent_since_pong = 0;
                        },
 
                        // Channel messages:
                        wire::Message::OpenChannel(msg) => {
-                               self.message_handler.chan_handler.handle_open_channel(&peer.their_node_id.unwrap(), peer.their_features.clone().unwrap(), &msg);
+                               self.message_handler.chan_handler.handle_open_channel(&their_node_id, their_features.clone().unwrap(), &msg);
                        },
                        wire::Message::AcceptChannel(msg) => {
-                               self.message_handler.chan_handler.handle_accept_channel(&peer.their_node_id.unwrap(), peer.their_features.clone().unwrap(), &msg);
+                               self.message_handler.chan_handler.handle_accept_channel(&their_node_id, their_features.clone().unwrap(), &msg);
                        },
 
                        wire::Message::FundingCreated(msg) => {
-                               self.message_handler.chan_handler.handle_funding_created(&peer.their_node_id.unwrap(), &msg);
+                               self.message_handler.chan_handler.handle_funding_created(&their_node_id, &msg);
                        },
                        wire::Message::FundingSigned(msg) => {
-                               self.message_handler.chan_handler.handle_funding_signed(&peer.their_node_id.unwrap(), &msg);
+                               self.message_handler.chan_handler.handle_funding_signed(&their_node_id, &msg);
                        },
                        wire::Message::FundingLocked(msg) => {
-                               self.message_handler.chan_handler.handle_funding_locked(&peer.their_node_id.unwrap(), &msg);
+                               self.message_handler.chan_handler.handle_funding_locked(&their_node_id, &msg);
                        },
 
                        wire::Message::Shutdown(msg) => {
-                               self.message_handler.chan_handler.handle_shutdown(&peer.their_node_id.unwrap(), peer.their_features.as_ref().unwrap(), &msg);
+                               self.message_handler.chan_handler.handle_shutdown(&their_node_id, their_features.as_ref().unwrap(), &msg);
                        },
                        wire::Message::ClosingSigned(msg) => {
-                               self.message_handler.chan_handler.handle_closing_signed(&peer.their_node_id.unwrap(), &msg);
+                               self.message_handler.chan_handler.handle_closing_signed(&their_node_id, &msg);
                        },
 
                        // Commitment messages:
                        wire::Message::UpdateAddHTLC(msg) => {
-                               self.message_handler.chan_handler.handle_update_add_htlc(&peer.their_node_id.unwrap(), &msg);
+                               self.message_handler.chan_handler.handle_update_add_htlc(&their_node_id, &msg);
                        },
                        wire::Message::UpdateFulfillHTLC(msg) => {
-                               self.message_handler.chan_handler.handle_update_fulfill_htlc(&peer.their_node_id.unwrap(), &msg);
+                               self.message_handler.chan_handler.handle_update_fulfill_htlc(&their_node_id, &msg);
                        },
                        wire::Message::UpdateFailHTLC(msg) => {
-                               self.message_handler.chan_handler.handle_update_fail_htlc(&peer.their_node_id.unwrap(), &msg);
+                               self.message_handler.chan_handler.handle_update_fail_htlc(&their_node_id, &msg);
                        },
                        wire::Message::UpdateFailMalformedHTLC(msg) => {
-                               self.message_handler.chan_handler.handle_update_fail_malformed_htlc(&peer.their_node_id.unwrap(), &msg);
+                               self.message_handler.chan_handler.handle_update_fail_malformed_htlc(&their_node_id, &msg);
                        },
 
                        wire::Message::CommitmentSigned(msg) => {
-                               self.message_handler.chan_handler.handle_commitment_signed(&peer.their_node_id.unwrap(), &msg);
+                               self.message_handler.chan_handler.handle_commitment_signed(&their_node_id, &msg);
                        },
                        wire::Message::RevokeAndACK(msg) => {
-                               self.message_handler.chan_handler.handle_revoke_and_ack(&peer.their_node_id.unwrap(), &msg);
+                               self.message_handler.chan_handler.handle_revoke_and_ack(&their_node_id, &msg);
                        },
                        wire::Message::UpdateFee(msg) => {
-                               self.message_handler.chan_handler.handle_update_fee(&peer.their_node_id.unwrap(), &msg);
+                               self.message_handler.chan_handler.handle_update_fee(&their_node_id, &msg);
                        },
                        wire::Message::ChannelReestablish(msg) => {
-                               self.message_handler.chan_handler.handle_channel_reestablish(&peer.their_node_id.unwrap(), &msg);
+                               self.message_handler.chan_handler.handle_channel_reestablish(&their_node_id, &msg);
                        },
 
                        // Routing messages:
                        wire::Message::AnnouncementSignatures(msg) => {
-                               self.message_handler.chan_handler.handle_announcement_signatures(&peer.their_node_id.unwrap(), &msg);
+                               self.message_handler.chan_handler.handle_announcement_signatures(&their_node_id, &msg);
                        },
                        wire::Message::ChannelAnnouncement(msg) => {
                                if self.message_handler.route_handler.handle_channel_announcement(&msg)
@@ -1186,26 +1256,23 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                }
                        },
                        wire::Message::ChannelUpdate(msg) => {
-                               self.message_handler.chan_handler.handle_channel_update(&peer.their_node_id.unwrap(), &msg);
+                               self.message_handler.chan_handler.handle_channel_update(&their_node_id, &msg);
                                if self.message_handler.route_handler.handle_channel_update(&msg)
                                                .map_err(|e| -> MessageHandlingError { e.into() })? {
                                        should_forward = Some(wire::Message::ChannelUpdate(msg));
                                }
                        },
                        wire::Message::QueryShortChannelIds(msg) => {
-                               self.message_handler.route_handler.handle_query_short_channel_ids(&peer.their_node_id.unwrap(), msg)?;
+                               self.message_handler.route_handler.handle_query_short_channel_ids(&their_node_id, msg)?;
                        },
                        wire::Message::ReplyShortChannelIdsEnd(msg) => {
-                               self.message_handler.route_handler.handle_reply_short_channel_ids_end(&peer.their_node_id.unwrap(), msg)?;
+                               self.message_handler.route_handler.handle_reply_short_channel_ids_end(&their_node_id, msg)?;
                        },
                        wire::Message::QueryChannelRange(msg) => {
-                               self.message_handler.route_handler.handle_query_channel_range(&peer.their_node_id.unwrap(), msg)?;
+                               self.message_handler.route_handler.handle_query_channel_range(&their_node_id, msg)?;
                        },
                        wire::Message::ReplyChannelRange(msg) => {
-                               self.message_handler.route_handler.handle_reply_channel_range(&peer.their_node_id.unwrap(), msg)?;
-                       },
-                       wire::Message::GossipTimestampFilter(_msg) => {
-                               // TODO: handle message
+                               self.message_handler.route_handler.handle_reply_channel_range(&their_node_id, msg)?;
                        },
 
                        // Unknown messages:
@@ -1218,19 +1285,20 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                log_trace!(self.logger, "Received unknown odd message of type {}, ignoring", type_id);
                        },
                        wire::Message::Custom(custom) => {
-                               self.custom_message_handler.handle_custom_message(custom, &peer.their_node_id.unwrap())?;
+                               self.custom_message_handler.handle_custom_message(custom, &their_node_id)?;
                        },
                };
                Ok(should_forward)
        }
 
-       fn forward_broadcast_msg(&self, peers: &mut PeerHolder<Descriptor>, msg: &wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>, except_node: Option<&PublicKey>) {
+       fn forward_broadcast_msg(&self, peers: &HashMap<Descriptor, Mutex<Peer>>, msg: &wire::Message<<<CMH as core::ops::Deref>::Target as wire::CustomMessageReader>::CustomMessage>, except_node: Option<&PublicKey>) {
                match msg {
                        wire::Message::ChannelAnnouncement(ref msg) => {
                                log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced channel's counterparties: {:?}", except_node, msg);
                                let encoded_msg = encode_msg!(msg);
 
-                               for (_, peer) in peers.peers.iter_mut() {
+                               for (_, peer_mutex) in peers.iter() {
+                                       let mut peer = peer_mutex.lock().unwrap();
                                        if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_features.is_none() ||
                                                        !peer.should_forward_channel_announcement(msg.contents.short_channel_id) {
                                                continue
@@ -1248,14 +1316,15 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                        if except_node.is_some() && peer.their_node_id.as_ref() == except_node {
                                                continue;
                                        }
-                                       self.enqueue_encoded_message(peer, &encoded_msg);
+                                       self.enqueue_encoded_message(&mut *peer, &encoded_msg);
                                }
                        },
                        wire::Message::NodeAnnouncement(ref msg) => {
                                log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced node: {:?}", except_node, msg);
                                let encoded_msg = encode_msg!(msg);
 
-                               for (_, peer) in peers.peers.iter_mut() {
+                               for (_, peer_mutex) in peers.iter() {
+                                       let mut peer = peer_mutex.lock().unwrap();
                                        if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_features.is_none() ||
                                                        !peer.should_forward_node_announcement(msg.contents.node_id) {
                                                continue
@@ -1272,14 +1341,15 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                        if except_node.is_some() && peer.their_node_id.as_ref() == except_node {
                                                continue;
                                        }
-                                       self.enqueue_encoded_message(peer, &encoded_msg);
+                                       self.enqueue_encoded_message(&mut *peer, &encoded_msg);
                                }
                        },
                        wire::Message::ChannelUpdate(ref msg) => {
                                log_gossip!(self.logger, "Sending message to all peers except {:?}: {:?}", except_node, msg);
                                let encoded_msg = encode_msg!(msg);
 
-                               for (_, peer) in peers.peers.iter_mut() {
+                               for (_, peer_mutex) in peers.iter() {
+                                       let mut peer = peer_mutex.lock().unwrap();
                                        if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_features.is_none() ||
                                                        !peer.should_forward_channel_announcement(msg.contents.short_channel_id)  {
                                                continue
@@ -1293,7 +1363,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                        if except_node.is_some() && peer.their_node_id.as_ref() == except_node {
                                                continue;
                                        }
-                                       self.enqueue_encoded_message(peer, &encoded_msg);
+                                       self.enqueue_encoded_message(&mut *peer, &encoded_msg);
                                }
                        },
                        _ => debug_assert!(false, "We shouldn't attempt to forward anything but gossip messages"),
@@ -1310,31 +1380,67 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
        /// You don't have to call this function explicitly if you are using [`lightning-net-tokio`]
        /// or one of the other clients provided in our language bindings.
        ///
+       /// Note that if there are any other calls to this function waiting on lock(s) this may return
+       /// without doing any work. All available events that need handling will be handled before the
+       /// other calls return.
+       ///
        /// [`send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
        /// [`ChannelManager::process_pending_htlc_forwards`]: crate::ln::channelmanager::ChannelManager::process_pending_htlc_forwards
        /// [`send_data`]: SocketDescriptor::send_data
        pub fn process_events(&self) {
+               let mut _single_processor_lock = self.event_processing_lock.try_lock();
+               if _single_processor_lock.is_err() {
+                       // While we could wake the older sleeper here with a CV and make more even waiting
+                       // times, that would be a lot of overengineering for a simple "reduce total waiter
+                       // count" goal.
+                       match self.blocked_event_processors.compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) {
+                               Err(val) => {
+                                       debug_assert!(val, "compare_exchange failed spuriously?");
+                                       return;
+                               },
+                               Ok(val) => {
+                                       debug_assert!(!val, "compare_exchange succeeded spuriously?");
+                                       // We're the only waiter, as the running process_events may have emptied the
+                                       // pending events "long" ago and there are new events for us to process, wait until
+                                       // its done and process any leftover events before returning.
+                                       _single_processor_lock = Ok(self.event_processing_lock.lock().unwrap());
+                                       self.blocked_event_processors.store(false, Ordering::Release);
+                               }
+                       }
+               }
+
+               let mut peers_to_disconnect = HashMap::new();
+               let mut events_generated = self.message_handler.chan_handler.get_and_clear_pending_msg_events();
+               events_generated.append(&mut self.message_handler.route_handler.get_and_clear_pending_msg_events());
+
                {
                        // TODO: There are some DoS attacks here where you can flood someone's outbound send
                        // buffer by doing things like announcing channels on another node. We should be willing to
                        // drop optional-ish messages when send buffers get full!
 
-                       let mut peers_lock = self.peers.lock().unwrap();
-                       let mut events_generated = self.message_handler.chan_handler.get_and_clear_pending_msg_events();
-                       events_generated.append(&mut self.message_handler.route_handler.get_and_clear_pending_msg_events());
-                       let peers = &mut *peers_lock;
+                       let peers_lock = self.peers.read().unwrap();
+                       let peers = &*peers_lock;
                        macro_rules! get_peer_for_forwarding {
                                ($node_id: expr) => {
                                        {
-                                               match peers.node_id_to_descriptor.get($node_id) {
-                                                       Some(descriptor) => match peers.peers.get_mut(&descriptor) {
-                                                               Some(peer) => {
-                                                                       if peer.their_features.is_none() {
+                                               if peers_to_disconnect.get($node_id).is_some() {
+                                                       // If we've "disconnected" this peer, do not send to it.
+                                                       continue;
+                                               }
+                                               let descriptor_opt = self.node_id_to_descriptor.lock().unwrap().get($node_id).cloned();
+                                               match descriptor_opt {
+                                                       Some(descriptor) => match peers.get(&descriptor) {
+                                                               Some(peer_mutex) => {
+                                                                       let peer_lock = peer_mutex.lock().unwrap();
+                                                                       if peer_lock.their_features.is_none() {
                                                                                continue;
                                                                        }
-                                                                       peer
+                                                                       peer_lock
                                                                },
-                                                               None => panic!("Inconsistent peers set state!"),
+                                                               None => {
+                                                                       debug_assert!(false, "Inconsistent peers set state!");
+                                                                       continue;
+                                                               }
                                                        },
                                                        None => {
                                                                continue;
@@ -1349,13 +1455,13 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                                log_debug!(self.logger, "Handling SendAcceptChannel event in peer_handler for node {} for channel {}",
                                                                log_pubkey!(node_id),
                                                                log_bytes!(msg.temporary_channel_id));
-                                               self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                        },
                                        MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => {
                                                log_debug!(self.logger, "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
                                                                log_pubkey!(node_id),
                                                                log_bytes!(msg.temporary_channel_id));
-                                               self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                        },
                                        MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => {
                                                log_debug!(self.logger, "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
@@ -1364,25 +1470,25 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                                                log_funding_channel_id!(msg.funding_txid, msg.funding_output_index));
                                                // TODO: If the peer is gone we should generate a DiscardFunding event
                                                // indicating to the wallet that they should just throw away this funding transaction
-                                               self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                        },
                                        MessageSendEvent::SendFundingSigned { ref node_id, ref msg } => {
                                                log_debug!(self.logger, "Handling SendFundingSigned event in peer_handler for node {} for channel {}",
                                                                log_pubkey!(node_id),
                                                                log_bytes!(msg.channel_id));
-                                               self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                        },
                                        MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => {
                                                log_debug!(self.logger, "Handling SendFundingLocked event in peer_handler for node {} for channel {}",
                                                                log_pubkey!(node_id),
                                                                log_bytes!(msg.channel_id));
-                                               self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                        },
                                        MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
                                                log_debug!(self.logger, "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})",
                                                                log_pubkey!(node_id),
                                                                log_bytes!(msg.channel_id));
-                                               self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                        },
                                        MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
                                                log_debug!(self.logger, "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}",
@@ -1391,47 +1497,47 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                                                update_fulfill_htlcs.len(),
                                                                update_fail_htlcs.len(),
                                                                log_bytes!(commitment_signed.channel_id));
-                                               let peer = get_peer_for_forwarding!(node_id);
+                                               let mut peer = get_peer_for_forwarding!(node_id);
                                                for msg in update_add_htlcs {
-                                                       self.enqueue_message(peer, msg);
+                                                       self.enqueue_message(&mut *peer, msg);
                                                }
                                                for msg in update_fulfill_htlcs {
-                                                       self.enqueue_message(peer, msg);
+                                                       self.enqueue_message(&mut *peer, msg);
                                                }
                                                for msg in update_fail_htlcs {
-                                                       self.enqueue_message(peer, msg);
+                                                       self.enqueue_message(&mut *peer, msg);
                                                }
                                                for msg in update_fail_malformed_htlcs {
-                                                       self.enqueue_message(peer, msg);
+                                                       self.enqueue_message(&mut *peer, msg);
                                                }
                                                if let &Some(ref msg) = update_fee {
-                                                       self.enqueue_message(peer, msg);
+                                                       self.enqueue_message(&mut *peer, msg);
                                                }
-                                               self.enqueue_message(peer, commitment_signed);
+                                               self.enqueue_message(&mut *peer, commitment_signed);
                                        },
                                        MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
                                                log_debug!(self.logger, "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}",
                                                                log_pubkey!(node_id),
                                                                log_bytes!(msg.channel_id));
-                                               self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                        },
                                        MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
                                                log_debug!(self.logger, "Handling SendClosingSigned event in peer_handler for node {} for channel {}",
                                                                log_pubkey!(node_id),
                                                                log_bytes!(msg.channel_id));
-                                               self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                        },
                                        MessageSendEvent::SendShutdown { ref node_id, ref msg } => {
                                                log_debug!(self.logger, "Handling Shutdown event in peer_handler for node {} for channel {}",
                                                                log_pubkey!(node_id),
                                                                log_bytes!(msg.channel_id));
-                                               self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                        },
                                        MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
                                                log_debug!(self.logger, "Handling SendChannelReestablish event in peer_handler for node {} for channel {}",
                                                                log_pubkey!(node_id),
                                                                log_bytes!(msg.channel_id));
-                                               self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                        },
                                        MessageSendEvent::BroadcastChannelAnnouncement { msg, update_msg } => {
                                                log_debug!(self.logger, "Handling BroadcastChannelAnnouncement event in peer_handler for short channel id {}", msg.contents.short_channel_id);
@@ -1465,29 +1571,15 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                        MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
                                                log_trace!(self.logger, "Handling SendChannelUpdate event in peer_handler for node {} for channel {}",
                                                                log_pubkey!(node_id), msg.contents.short_channel_id);
-                                               let peer = get_peer_for_forwarding!(node_id);
-                                               peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                        },
                                        MessageSendEvent::HandleError { ref node_id, ref action } => {
                                                match *action {
                                                        msgs::ErrorAction::DisconnectPeer { ref msg } => {
-                                                               if let Some(mut descriptor) = peers.node_id_to_descriptor.remove(node_id) {
-                                                                       if let Some(mut peer) = peers.peers.remove(&descriptor) {
-                                                                               if let Some(ref msg) = *msg {
-                                                                                       log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
-                                                                                                       log_pubkey!(node_id),
-                                                                                                       msg.data);
-                                                                                       self.enqueue_message(&mut peer, msg);
-                                                                                       // This isn't guaranteed to work, but if there is enough free
-                                                                                       // room in the send buffer, put the error message there...
-                                                                                       self.do_attempt_write_data(&mut descriptor, &mut peer);
-                                                                               } else {
-                                                                                       log_gossip!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with no message", log_pubkey!(node_id));
-                                                                               }
-                                                                       }
-                                                                       descriptor.disconnect_socket();
-                                                                       self.message_handler.chan_handler.peer_disconnected(&node_id, false);
-                                                               }
+                                                               // We do not have the peers write lock, so we just store that we're
+                                                               // about to disconenct the peer and do it after we finish
+                                                               // processing most messages.
+                                                               peers_to_disconnect.insert(*node_id, msg.clone());
                                                        },
                                                        msgs::ErrorAction::IgnoreAndLog(level) => {
                                                                log_given_level!(self.logger, level, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
@@ -1500,21 +1592,21 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                                                log_trace!(self.logger, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}",
                                                                                log_pubkey!(node_id),
                                                                                msg.data);
-                                                               self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+                                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                        },
                                                        msgs::ErrorAction::SendWarningMessage { ref msg, ref log_level } => {
                                                                log_given_level!(self.logger, *log_level, "Handling SendWarningMessage HandleError event in peer_handler for node {} with message {}",
                                                                                log_pubkey!(node_id),
                                                                                msg.data);
-                                                               self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+                                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                        },
                                                }
                                        },
                                        MessageSendEvent::SendChannelRangeQuery { ref node_id, ref msg } => {
-                                               self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                        },
                                        MessageSendEvent::SendShortIdsQuery { ref node_id, ref msg } => {
-                                               self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                        }
                                        MessageSendEvent::SendReplyChannelRange { ref node_id, ref msg } => {
                                                log_gossip!(self.logger, "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}",
@@ -1523,20 +1615,50 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                                        msg.first_blocknum,
                                                        msg.number_of_blocks,
                                                        msg.sync_complete);
-                                               self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                        }
                                        MessageSendEvent::SendGossipTimestampFilter { ref node_id, ref msg } => {
-                                               self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
+                                               self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                        }
                                }
                        }
 
                        for (node_id, msg) in self.custom_message_handler.get_and_clear_pending_msg() {
-                               self.enqueue_message(get_peer_for_forwarding!(&node_id), &msg);
+                               if peers_to_disconnect.get(&node_id).is_some() { continue; }
+                               self.enqueue_message(&mut *get_peer_for_forwarding!(&node_id), &msg);
                        }
 
-                       for (descriptor, peer) in peers.peers.iter_mut() {
-                               self.do_attempt_write_data(&mut (*descriptor).clone(), peer);
+                       for (descriptor, peer_mutex) in peers.iter() {
+                               self.do_attempt_write_data(&mut (*descriptor).clone(), &mut *peer_mutex.lock().unwrap());
+                       }
+               }
+               if !peers_to_disconnect.is_empty() {
+                       let mut peers_lock = self.peers.write().unwrap();
+                       let peers = &mut *peers_lock;
+                       for (node_id, msg) in peers_to_disconnect.drain() {
+                               // Note that since we are holding the peers *write* lock we can
+                               // remove from node_id_to_descriptor immediately (as no other
+                               // thread can be holding the peer lock if we have the global write
+                               // lock).
+
+                               if let Some(mut descriptor) = self.node_id_to_descriptor.lock().unwrap().remove(&node_id) {
+                                       if let Some(peer_mutex) = peers.remove(&descriptor) {
+                                               if let Some(msg) = msg {
+                                                       log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
+                                                                       log_pubkey!(node_id),
+                                                                       msg.data);
+                                                       let mut peer = peer_mutex.lock().unwrap();
+                                                       self.enqueue_message(&mut *peer, &msg);
+                                                       // This isn't guaranteed to work, but if there is enough free
+                                                       // room in the send buffer, put the error message there...
+                                                       self.do_attempt_write_data(&mut descriptor, &mut *peer);
+                                               } else {
+                                                       log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with no message", log_pubkey!(node_id));
+                                               }
+                                       }
+                                       descriptor.disconnect_socket();
+                                       self.message_handler.chan_handler.peer_disconnected(&node_id, false);
+                               }
                        }
                }
        }
@@ -1547,24 +1669,22 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
        }
 
        fn disconnect_event_internal(&self, descriptor: &Descriptor, no_connection_possible: bool) {
-               let mut peers = self.peers.lock().unwrap();
-               let peer_option = peers.peers.remove(descriptor);
+               let mut peers = self.peers.write().unwrap();
+               let peer_option = peers.remove(descriptor);
                match peer_option {
                        None => {
                                // This is most likely a simple race condition where the user found that the socket
                                // was disconnected, then we told the user to `disconnect_socket()`, then they
                                // called this method. Either way we're disconnected, return.
                        },
-                       Some(peer) => {
-                               match peer.their_node_id {
-                                       Some(node_id) => {
-                                               log_trace!(self.logger,
-                                                       "Handling disconnection of peer {}, with {}future connection to the peer possible.",
-                                                       log_pubkey!(node_id), if no_connection_possible { "no " } else { "" });
-                                               peers.node_id_to_descriptor.remove(&node_id);
-                                               self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
-                                       },
-                                       None => {}
+                       Some(peer_lock) => {
+                               let peer = peer_lock.lock().unwrap();
+                               if let Some(node_id) = peer.their_node_id {
+                                       log_trace!(self.logger,
+                                               "Handling disconnection of peer {}, with {}future connection to the peer possible.",
+                                               log_pubkey!(node_id), if no_connection_possible { "no " } else { "" });
+                                       self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
+                                       self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
                                }
                        }
                };
@@ -1580,10 +1700,10 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
        ///
        /// [`disconnect_socket`]: SocketDescriptor::disconnect_socket
        pub fn disconnect_by_node_id(&self, node_id: PublicKey, no_connection_possible: bool) {
-               let mut peers_lock = self.peers.lock().unwrap();
-               if let Some(mut descriptor) = peers_lock.node_id_to_descriptor.remove(&node_id) {
+               let mut peers_lock = self.peers.write().unwrap();
+               if let Some(mut descriptor) = self.node_id_to_descriptor.lock().unwrap().remove(&node_id) {
                        log_trace!(self.logger, "Disconnecting peer with id {} due to client request", node_id);
-                       peers_lock.peers.remove(&descriptor);
+                       peers_lock.remove(&descriptor);
                        self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
                        descriptor.disconnect_socket();
                }
@@ -1593,17 +1713,16 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
        /// an indication that TCP sockets have stalled even if we weren't around to time them out
        /// using regular ping/pongs.
        pub fn disconnect_all_peers(&self) {
-               let mut peers_lock = self.peers.lock().unwrap();
+               let mut peers_lock = self.peers.write().unwrap();
+               self.node_id_to_descriptor.lock().unwrap().clear();
                let peers = &mut *peers_lock;
-               for (mut descriptor, peer) in peers.peers.drain() {
-                       if let Some(node_id) = peer.their_node_id {
+               for (mut descriptor, peer) in peers.drain() {
+                       if let Some(node_id) = peer.lock().unwrap().their_node_id {
                                log_trace!(self.logger, "Disconnecting peer with id {} due to client request to disconnect all peers", node_id);
-                               peers.node_id_to_descriptor.remove(&node_id);
                                self.message_handler.chan_handler.peer_disconnected(&node_id, false);
                        }
                        descriptor.disconnect_socket();
                }
-               debug_assert!(peers.node_id_to_descriptor.is_empty());
        }
 
        /// This is called when we're blocked on sending additional gossip messages until we receive a
@@ -1632,57 +1751,44 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
        ///
        /// [`send_data`]: SocketDescriptor::send_data
        pub fn timer_tick_occurred(&self) {
-               let mut peers_lock = self.peers.lock().unwrap();
+               let mut descriptors_needing_disconnect = Vec::new();
                {
-                       let peers = &mut *peers_lock;
-                       let node_id_to_descriptor = &mut peers.node_id_to_descriptor;
-                       let peers = &mut peers.peers;
-                       let mut descriptors_needing_disconnect = Vec::new();
-                       let peer_count = peers.len();
+                       let peers_lock = self.peers.read().unwrap();
 
-                       peers.retain(|descriptor, peer| {
-                               let mut do_disconnect_peer = false;
+                       for (descriptor, peer_mutex) in peers_lock.iter() {
+                               let mut peer = peer_mutex.lock().unwrap();
                                if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_node_id.is_none() {
                                        // The peer needs to complete its handshake before we can exchange messages. We
                                        // give peers one timer tick to complete handshake, reusing
                                        // `awaiting_pong_timer_tick_intervals` to track number of timer ticks taken
                                        // for handshake completion.
                                        if peer.awaiting_pong_timer_tick_intervals != 0 {
-                                               do_disconnect_peer = true;
+                                               descriptors_needing_disconnect.push(descriptor.clone());
                                        } else {
                                                peer.awaiting_pong_timer_tick_intervals = 1;
-                                               return true;
                                        }
+                                       continue;
                                }
 
                                if peer.awaiting_pong_timer_tick_intervals == -1 {
                                        // Magic value set in `maybe_send_extra_ping`.
                                        peer.awaiting_pong_timer_tick_intervals = 1;
                                        peer.received_message_since_timer_tick = false;
-                                       return true;
+                                       continue;
                                }
 
-                               if do_disconnect_peer
-                                       || (peer.awaiting_pong_timer_tick_intervals > 0 && !peer.received_message_since_timer_tick)
+                               if (peer.awaiting_pong_timer_tick_intervals > 0 && !peer.received_message_since_timer_tick)
                                        || peer.awaiting_pong_timer_tick_intervals as u64 >
-                                               MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER as u64 * peer_count as u64
+                                               MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER as u64 * peers_lock.len() as u64
                                {
                                        descriptors_needing_disconnect.push(descriptor.clone());
-                                       match peer.their_node_id {
-                                               Some(node_id) => {
-                                                       log_trace!(self.logger, "Disconnecting peer with id {} due to ping timeout", node_id);
-                                                       node_id_to_descriptor.remove(&node_id);
-                                                       self.message_handler.chan_handler.peer_disconnected(&node_id, false);
-                                               }
-                                               None => {},
-                                       }
-                                       return false;
+                                       continue;
                                }
                                peer.received_message_since_timer_tick = false;
 
                                if peer.awaiting_pong_timer_tick_intervals > 0 {
                                        peer.awaiting_pong_timer_tick_intervals += 1;
-                                       return true;
+                                       continue;
                                }
 
                                peer.awaiting_pong_timer_tick_intervals = 1;
@@ -1690,11 +1796,24 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
                                        ponglen: 0,
                                        byteslen: 64,
                                };
-                               self.enqueue_message(peer, &ping);
+                               self.enqueue_message(&mut *peer, &ping);
                                self.do_attempt_write_data(&mut (descriptor.clone()), &mut *peer);
+                       }
+               }
 
-                               true
-                       });
+               if !descriptors_needing_disconnect.is_empty() {
+                       {
+                               let mut peers_lock = self.peers.write().unwrap();
+                               for descriptor in descriptors_needing_disconnect.iter() {
+                                       if let Some(peer) = peers_lock.remove(descriptor) {
+                                               if let Some(node_id) = peer.lock().unwrap().their_node_id {
+                                                       log_trace!(self.logger, "Disconnecting peer with id {} due to ping timeout", node_id);
+                                                       self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
+                                                       self.message_handler.chan_handler.peer_disconnected(&node_id, false);
+                                               }
+                                       }
+                               }
+                       }
 
                        for mut descriptor in descriptors_needing_disconnect.drain(..) {
                                descriptor.disconnect_socket();
@@ -1707,7 +1826,11 @@ fn is_gossip_msg(type_id: u16) -> bool {
        match type_id {
                msgs::ChannelAnnouncement::TYPE |
                msgs::ChannelUpdate::TYPE |
-               msgs::NodeAnnouncement::TYPE => true,
+               msgs::NodeAnnouncement::TYPE |
+               msgs::QueryChannelRange::TYPE |
+               msgs::ReplyChannelRange::TYPE |
+               msgs::QueryShortChannelIds::TYPE |
+               msgs::ReplyShortChannelIdsEnd::TYPE => true,
                _ => false
        }
 }
@@ -1715,13 +1838,13 @@ fn is_gossip_msg(type_id: u16) -> bool {
 #[cfg(test)]
 mod tests {
        use ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses};
-       use ln::msgs;
+       use ln::{msgs, wire};
        use ln::msgs::NetAddress;
        use util::events;
        use util::test_utils;
 
        use bitcoin::secp256k1::Secp256k1;
-       use bitcoin::secp256k1::key::{SecretKey, PublicKey};
+       use bitcoin::secp256k1::{SecretKey, PublicKey};
 
        use prelude::*;
        use sync::{Arc, Mutex};
@@ -1799,6 +1922,8 @@ mod tests {
                assert_eq!(peer_b.read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(), false);
                peer_b.process_events();
                assert_eq!(peer_a.read_event(&mut fd_a, &fd_b.outbound_data.lock().unwrap().split_off(0)).unwrap(), false);
+               peer_a.process_events();
+               assert_eq!(peer_b.read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(), false);
                (fd_a.clone(), fd_b.clone())
        }
 
@@ -1810,7 +1935,7 @@ mod tests {
                let chan_handler = test_utils::TestChannelMessageHandler::new();
                let mut peers = create_network(2, &cfgs);
                establish_connection(&peers[0], &peers[1]);
-               assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1);
+               assert_eq!(peers[0].peers.read().unwrap().len(), 1);
 
                let secp_ctx = Secp256k1::new();
                let their_id = PublicKey::from_secret_key(&secp_ctx, &peers[1].our_node_secret);
@@ -1823,7 +1948,49 @@ mod tests {
                peers[0].message_handler.chan_handler = &chan_handler;
 
                peers[0].process_events();
-               assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 0);
+               assert_eq!(peers[0].peers.read().unwrap().len(), 0);
+       }
+
+       #[test]
+       fn test_send_simple_msg() {
+               // Simple test which builds a network of PeerManager, connects and brings them to NoiseState::Finished and
+               // push a message from one peer to another.
+               let cfgs = create_peermgr_cfgs(2);
+               let a_chan_handler = test_utils::TestChannelMessageHandler::new();
+               let b_chan_handler = test_utils::TestChannelMessageHandler::new();
+               let mut peers = create_network(2, &cfgs);
+               let (fd_a, mut fd_b) = establish_connection(&peers[0], &peers[1]);
+               assert_eq!(peers[0].peers.read().unwrap().len(), 1);
+
+               let secp_ctx = Secp256k1::new();
+               let their_id = PublicKey::from_secret_key(&secp_ctx, &peers[1].our_node_secret);
+
+               let msg = msgs::Shutdown { channel_id: [42; 32], scriptpubkey: bitcoin::Script::new() };
+               a_chan_handler.pending_events.lock().unwrap().push(events::MessageSendEvent::SendShutdown {
+                       node_id: their_id, msg: msg.clone()
+               });
+               peers[0].message_handler.chan_handler = &a_chan_handler;
+
+               b_chan_handler.expect_receive_msg(wire::Message::Shutdown(msg));
+               peers[1].message_handler.chan_handler = &b_chan_handler;
+
+               peers[0].process_events();
+
+               let a_data = fd_a.outbound_data.lock().unwrap().split_off(0);
+               assert_eq!(peers[1].read_event(&mut fd_b, &a_data).unwrap(), false);
+       }
+
+       #[test]
+       fn test_disconnect_all_peer() {
+               // Simple test which builds a network of PeerManager, connects and brings them to NoiseState::Finished and
+               // then calls disconnect_all_peers
+               let cfgs = create_peermgr_cfgs(2);
+               let peers = create_network(2, &cfgs);
+               establish_connection(&peers[0], &peers[1]);
+               assert_eq!(peers[0].peers.read().unwrap().len(), 1);
+
+               peers[0].disconnect_all_peers();
+               assert_eq!(peers[0].peers.read().unwrap().len(), 0);
        }
 
        #[test]
@@ -1832,17 +1999,17 @@ mod tests {
                let cfgs = create_peermgr_cfgs(2);
                let peers = create_network(2, &cfgs);
                establish_connection(&peers[0], &peers[1]);
-               assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1);
+               assert_eq!(peers[0].peers.read().unwrap().len(), 1);
 
                // peers[0] awaiting_pong is set to true, but the Peer is still connected
                peers[0].timer_tick_occurred();
                peers[0].process_events();
-               assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1);
+               assert_eq!(peers[0].peers.read().unwrap().len(), 1);
 
                // Since timer_tick_occurred() is called again when awaiting_pong is true, all Peers are disconnected
                peers[0].timer_tick_occurred();
                peers[0].process_events();
-               assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 0);
+               assert_eq!(peers[0].peers.read().unwrap().len(), 0);
        }
 
        #[test]
@@ -1862,21 +2029,21 @@ mod tests {
                let (mut fd_a, mut fd_b) = establish_connection(&peers[0], &peers[1]);
 
                // Make each peer to read the messages that the other peer just wrote to them. Note that
-               // due to the max-messagse-before-ping limits this may take a few iterations to complete.
+               // due to the max-message-before-ping limits this may take a few iterations to complete.
                for _ in 0..150/super::BUFFER_DRAIN_MSGS_PER_TICK + 1 {
-                       peers[0].process_events();
-                       let b_read_data = fd_a.outbound_data.lock().unwrap().split_off(0);
-                       assert!(!b_read_data.is_empty());
-
-                       peers[1].read_event(&mut fd_b, &b_read_data).unwrap();
                        peers[1].process_events();
-
                        let a_read_data = fd_b.outbound_data.lock().unwrap().split_off(0);
                        assert!(!a_read_data.is_empty());
+
                        peers[0].read_event(&mut fd_a, &a_read_data).unwrap();
+                       peers[0].process_events();
 
-                       peers[1].process_events();
-                       assert_eq!(fd_b.outbound_data.lock().unwrap().len(), 0, "Until B receives data, it shouldn't send more messages");
+                       let b_read_data = fd_a.outbound_data.lock().unwrap().split_off(0);
+                       assert!(!b_read_data.is_empty());
+                       peers[1].read_event(&mut fd_b, &b_read_data).unwrap();
+
+                       peers[0].process_events();
+                       assert_eq!(fd_a.outbound_data.lock().unwrap().len(), 0, "Until A receives data, it shouldn't send more messages");
                }
 
                // Check that each peer has received the expected number of channel updates and channel
@@ -1904,9 +2071,9 @@ mod tests {
                peers[0].new_inbound_connection(fd_a.clone(), None).unwrap();
 
                // If we get a single timer tick before completion, that's fine
-               assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1);
+               assert_eq!(peers[0].peers.read().unwrap().len(), 1);
                peers[0].timer_tick_occurred();
-               assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1);
+               assert_eq!(peers[0].peers.read().unwrap().len(), 1);
 
                assert_eq!(peers[0].read_event(&mut fd_a, &initial_data).unwrap(), false);
                peers[0].process_events();
@@ -1915,7 +2082,7 @@ mod tests {
 
                // ...but if we get a second timer tick, we should disconnect the peer
                peers[0].timer_tick_occurred();
-               assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 0);
+               assert_eq!(peers[0].peers.read().unwrap().len(), 0);
 
                assert!(peers[0].read_event(&mut fd_a, &fd_b.outbound_data.lock().unwrap().split_off(0)).is_err());
        }
index fa44a0655c1462cdf321fede0aeb0de83141626b..d7b940eb90e09dfce8e6557656327440fb987159 100644 (file)
@@ -19,7 +19,8 @@ use routing::network_graph::RoutingFees;
 use routing::router::{PaymentParameters, RouteHint, RouteHintHop};
 use ln::features::{InitFeatures, InvoiceFeatures};
 use ln::msgs;
-use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField};
+use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField, ChannelUpdate};
+use ln::wire::Encode;
 use util::enforcing_trait_impls::EnforcingSigner;
 use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
 use util::config::UserConfig;
@@ -528,12 +529,17 @@ fn test_scid_alias_returned() {
                excess_data: Vec::new(),
        };
        let msg_hash = Sha256dHash::hash(&contents.encode()[..]);
-       let signature = Secp256k1::new().sign(&hash_to_message!(&msg_hash[..]), &nodes[1].keys_manager.get_node_secret(Recipient::Node).unwrap());
+       let signature = Secp256k1::new().sign_ecdsa(&hash_to_message!(&msg_hash[..]), &nodes[1].keys_manager.get_node_secret(Recipient::Node).unwrap());
        let msg = msgs::ChannelUpdate { signature, contents };
 
+       let mut err_data = Vec::new();
+       err_data.extend_from_slice(&(msg.serialized_length() as u16 + 2).to_be_bytes());
+       err_data.extend_from_slice(&ChannelUpdate::TYPE.to_be_bytes());
+       err_data.extend_from_slice(&msg.encode());
+
        expect_payment_failed_conditions!(nodes[0], payment_hash, false,
                PaymentFailedConditions::new().blamed_scid(last_hop[0].inbound_scid_alias.unwrap())
-                       .blamed_chan_closed(false).expected_htlc_error_data(0x1000|7, &msg.encode_with_len()));
+                       .blamed_chan_closed(false).expected_htlc_error_data(0x1000|7, &err_data));
 
        route.paths[0][1].fee_msat = 10_000; // Reset to the correct payment amount
        route.paths[0][0].fee_msat = 0; // But set fee paid to the middle hop to 0
@@ -551,7 +557,9 @@ fn test_scid_alias_returned() {
 
        let mut err_data = Vec::new();
        err_data.extend_from_slice(&10_000u64.to_be_bytes());
-       err_data.extend_from_slice(&msg.encode_with_len());
+       err_data.extend_from_slice(&(msg.serialized_length() as u16 + 2).to_be_bytes());
+       err_data.extend_from_slice(&ChannelUpdate::TYPE.to_be_bytes());
+       err_data.extend_from_slice(&msg.encode());
        expect_payment_failed_conditions!(nodes[0], payment_hash, false,
                PaymentFailedConditions::new().blamed_scid(last_hop[0].inbound_scid_alias.unwrap())
                        .blamed_chan_closed(false).expected_htlc_error_data(0x1000|12, &err_data));
index de4026c1771c9dca855e739376fadcb7e57b60ac..8a4ec2dc3518a4d9ca67ae7040c500563f7d9e3c 100644 (file)
@@ -209,14 +209,14 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
                        let relevant_txids = nodes[0].node.get_relevant_txids();
                        assert_eq!(&relevant_txids[..], &[chan.3.txid()]);
                        nodes[0].node.transaction_unconfirmed(&relevant_txids[0]);
+               } else if connect_style == ConnectStyle::FullBlockViaListen {
+                       disconnect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH - 1);
+                       assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
+                       disconnect_blocks(&nodes[0], 1);
                } else {
                        disconnect_all_blocks(&nodes[0]);
                }
-               if connect_style == ConnectStyle::FullBlockViaListen && !use_funding_unconfirmed {
-                       handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 2 confs.");
-               } else {
-                       handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
-               }
+               handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
                check_added_monitors!(nodes[1], 1);
                {
                        let channel_state = nodes[0].node.channel_state.lock().unwrap();
@@ -277,14 +277,14 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
                        let relevant_txids = nodes[0].node.get_relevant_txids();
                        assert_eq!(&relevant_txids[..], &[chan.3.txid()]);
                        nodes[0].node.transaction_unconfirmed(&relevant_txids[0]);
+               } else if connect_style == ConnectStyle::FullBlockViaListen {
+                       disconnect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH - 1);
+                       assert_eq!(nodes[0].node.list_channels().len(), 1);
+                       disconnect_blocks(&nodes[0], 1);
                } else {
                        disconnect_all_blocks(&nodes[0]);
                }
-               if connect_style == ConnectStyle::FullBlockViaListen && !use_funding_unconfirmed {
-                       handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 2 confs.");
-               } else {
-                       handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
-               }
+               handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
                check_added_monitors!(nodes[1], 1);
                {
                        let channel_state = nodes[0].node.channel_state.lock().unwrap();
@@ -297,11 +297,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
        *nodes[0].chain_monitor.expect_channel_force_closed.lock().unwrap() = Some((chan.2, true));
        nodes[0].node.test_process_background_events(); // Required to free the pending background monitor update
        check_added_monitors!(nodes[0], 1);
-       let expected_err = if connect_style == ConnectStyle::FullBlockViaListen && !use_funding_unconfirmed {
-               "Funding transaction was un-confirmed. Locked at 6 confs, now have 2 confs."
-       } else {
-               "Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs."
-       };
+       let expected_err = "Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.";
        check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Channel closed because of an exception: ".to_owned() + expected_err });
        check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: expected_err.to_owned() });
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
index a9f44bae1aec79199230d467f426b50934c225dd..7abe3060fa7b666f22fc83048ca330a194e3fdfb 100644 (file)
@@ -4,14 +4,14 @@ use bitcoin::blockdata::opcodes::all::OP_PUSHBYTES_0 as SEGWIT_V0;
 use bitcoin::blockdata::script::{Builder, Script};
 use bitcoin::hashes::Hash;
 use bitcoin::hash_types::{WPubkeyHash, WScriptHash};
-use bitcoin::secp256k1::key::PublicKey;
+use bitcoin::secp256k1::PublicKey;
+use bitcoin::util::address::WitnessVersion;
 
 use ln::features::InitFeatures;
 use ln::msgs::DecodeError;
 use util::ser::{Readable, Writeable, Writer};
 
 use core::convert::TryFrom;
-use core::num::NonZeroU8;
 use io;
 
 /// A script pubkey for shutting down a channel as defined by [BOLT #2].
@@ -68,12 +68,12 @@ impl ShutdownScript {
 
        /// Generates a P2WPKH script pubkey from the given [`WPubkeyHash`].
        pub fn new_p2wpkh(pubkey_hash: &WPubkeyHash) -> Self {
-               Self(ShutdownScriptImpl::Bolt2(Script::new_v0_wpkh(pubkey_hash)))
+               Self(ShutdownScriptImpl::Bolt2(Script::new_v0_p2wpkh(pubkey_hash)))
        }
 
        /// Generates a P2WSH script pubkey from the given [`WScriptHash`].
        pub fn new_p2wsh(script_hash: &WScriptHash) -> Self {
-               Self(ShutdownScriptImpl::Bolt2(Script::new_v0_wsh(script_hash)))
+               Self(ShutdownScriptImpl::Bolt2(Script::new_v0_p2wsh(script_hash)))
        }
 
        /// Generates a witness script pubkey from the given segwit version and program.
@@ -84,9 +84,9 @@ impl ShutdownScript {
        /// # Errors
        ///
        /// This function may return an error if `program` is invalid for the segwit `version`.
-       pub fn new_witness_program(version: NonZeroU8, program: &[u8]) -> Result<Self, InvalidShutdownScript> {
+       pub fn new_witness_program(version: WitnessVersion, program: &[u8]) -> Result<Self, InvalidShutdownScript> {
                let script = Builder::new()
-                       .push_int(version.get().into())
+                       .push_int(version as i64)
                        .push_slice(&program)
                        .into_script();
                Self::try_from(script)
@@ -156,7 +156,7 @@ impl Into<Script> for ShutdownScript {
        fn into(self) -> Script {
                match self.0 {
                        ShutdownScriptImpl::Legacy(pubkey) =>
-                               Script::new_v0_wpkh(&WPubkeyHash::hash(&pubkey.serialize())),
+                               Script::new_v0_p2wpkh(&WPubkeyHash::hash(&pubkey.serialize())),
                        ShutdownScriptImpl::Bolt2(script_pubkey) => script_pubkey,
                }
        }
@@ -174,19 +174,18 @@ impl core::fmt::Display for ShutdownScript{
 #[cfg(test)]
 mod shutdown_script_tests {
        use super::ShutdownScript;
-       use bitcoin::bech32::u5;
        use bitcoin::blockdata::opcodes;
        use bitcoin::blockdata::script::{Builder, Script};
        use bitcoin::secp256k1::Secp256k1;
-       use bitcoin::secp256k1::key::{PublicKey, SecretKey};
+       use bitcoin::secp256k1::{PublicKey, SecretKey};
        use ln::features::InitFeatures;
        use core::convert::TryFrom;
-       use core::num::NonZeroU8;
+       use bitcoin::util::address::WitnessVersion;
 
-       fn pubkey() -> bitcoin::util::ecdsa::PublicKey {
+       fn pubkey() -> bitcoin::util::key::PublicKey {
                let secp_ctx = Secp256k1::signing_only();
                let secret_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]).unwrap();
-               bitcoin::util::ecdsa::PublicKey::new(PublicKey::from_secret_key(&secp_ctx, &secret_key))
+               bitcoin::util::key::PublicKey::new(PublicKey::from_secret_key(&secp_ctx, &secret_key))
        }
 
        fn redeem_script() -> Script {
@@ -204,9 +203,9 @@ mod shutdown_script_tests {
        fn generates_p2wpkh_from_pubkey() {
                let pubkey = pubkey();
                let pubkey_hash = pubkey.wpubkey_hash().unwrap();
-               let p2wpkh_script = Script::new_v0_wpkh(&pubkey_hash);
+               let p2wpkh_script = Script::new_v0_p2wpkh(&pubkey_hash);
 
-               let shutdown_script = ShutdownScript::new_p2wpkh_from_pubkey(pubkey.key);
+               let shutdown_script = ShutdownScript::new_p2wpkh_from_pubkey(pubkey.inner);
                assert!(shutdown_script.is_compatible(&InitFeatures::known()));
                assert!(shutdown_script.is_compatible(&InitFeatures::known().clear_shutdown_anysegwit()));
                assert_eq!(shutdown_script.into_inner(), p2wpkh_script);
@@ -215,7 +214,7 @@ mod shutdown_script_tests {
        #[test]
        fn generates_p2wpkh_from_pubkey_hash() {
                let pubkey_hash = pubkey().wpubkey_hash().unwrap();
-               let p2wpkh_script = Script::new_v0_wpkh(&pubkey_hash);
+               let p2wpkh_script = Script::new_v0_p2wpkh(&pubkey_hash);
 
                let shutdown_script = ShutdownScript::new_p2wpkh(&pubkey_hash);
                assert!(shutdown_script.is_compatible(&InitFeatures::known()));
@@ -227,7 +226,7 @@ mod shutdown_script_tests {
        #[test]
        fn generates_p2wsh_from_script_hash() {
                let script_hash = redeem_script().wscript_hash();
-               let p2wsh_script = Script::new_v0_wsh(&script_hash);
+               let p2wsh_script = Script::new_v0_p2wsh(&script_hash);
 
                let shutdown_script = ShutdownScript::new_p2wsh(&script_hash);
                assert!(shutdown_script.is_compatible(&InitFeatures::known()));
@@ -238,11 +237,8 @@ mod shutdown_script_tests {
 
        #[test]
        fn generates_segwit_from_non_v0_witness_program() {
-               let version = u5::try_from_u8(16).unwrap();
-               let witness_program = Script::new_witness_program(version, &[0; 40]);
-
-               let version = NonZeroU8::new(version.to_u8()).unwrap();
-               let shutdown_script = ShutdownScript::new_witness_program(version, &[0; 40]).unwrap();
+               let witness_program = Script::new_witness_program(WitnessVersion::V16, &[0; 40]);
+               let shutdown_script = ShutdownScript::new_witness_program(WitnessVersion::V16, &[0; 40]).unwrap();
                assert!(shutdown_script.is_compatible(&InitFeatures::known()));
                assert!(!shutdown_script.is_compatible(&InitFeatures::known().clear_shutdown_anysegwit()));
                assert_eq!(shutdown_script.into_inner(), witness_program);
@@ -254,25 +250,17 @@ mod shutdown_script_tests {
                assert!(ShutdownScript::try_from(op_return).is_err());
        }
 
-       #[test]
-       fn fails_from_invalid_segwit_version() {
-               let version = NonZeroU8::new(17).unwrap();
-               assert!(ShutdownScript::new_witness_program(version, &[0; 40]).is_err());
-       }
-
        #[test]
        fn fails_from_invalid_segwit_v0_witness_program() {
-               let witness_program = Script::new_witness_program(u5::try_from_u8(0).unwrap(), &[0; 2]);
+               let witness_program = Script::new_witness_program(WitnessVersion::V0, &[0; 2]);
                assert!(ShutdownScript::try_from(witness_program).is_err());
        }
 
        #[test]
        fn fails_from_invalid_segwit_non_v0_witness_program() {
-               let version = u5::try_from_u8(16).unwrap();
-               let witness_program = Script::new_witness_program(version, &[0; 42]);
+               let witness_program = Script::new_witness_program(WitnessVersion::V16, &[0; 42]);
                assert!(ShutdownScript::try_from(witness_program).is_err());
 
-               let version = NonZeroU8::new(version.to_u8()).unwrap();
-               assert!(ShutdownScript::new_witness_program(version, &[0; 42]).is_err());
+               assert!(ShutdownScript::new_witness_program(WitnessVersion::V16, &[0; 42]).is_err());
        }
 }
index 5e50ea77cc2cfeb87abc286b46c1f9158cac1026..6543f6b6e90e32fd5b92d418853b2576ca241f28 100644 (file)
@@ -26,11 +26,11 @@ use util::config::UserConfig;
 use bitcoin::blockdata::script::Builder;
 use bitcoin::blockdata::opcodes;
 use bitcoin::network::constants::Network;
+use bitcoin::util::address::WitnessVersion;
 
 use regex;
 
 use core::default::Default;
-use core::num::NonZeroU8;
 
 use ln::functional_test_utils::*;
 use ln::msgs::OptionalField::Present;
@@ -654,7 +654,7 @@ fn test_unsupported_anysegwit_shutdown_script() {
        // Check that using an unsupported shutdown script fails and a supported one succeeds.
        let supported_shutdown_script = chanmon_cfgs[1].keys_manager.get_shutdown_scriptpubkey();
        let unsupported_shutdown_script =
-               ShutdownScript::new_witness_program(NonZeroU8::new(16).unwrap(), &[0, 40]).unwrap();
+               ShutdownScript::new_witness_program(WitnessVersion::V16, &[0, 40]).unwrap();
        chanmon_cfgs[1].keys_manager
                .expect(OnGetShutdownScriptpubkey { returns: unsupported_shutdown_script.clone() })
                .expect(OnGetShutdownScriptpubkey { returns: supported_shutdown_script });
index e7db446a86f17f036ceeccba9aca52c4aa7ea652..8fd5c16f36261ce3782d72bb81846b3e5b503376 100644 (file)
@@ -28,11 +28,26 @@ pub trait CustomMessageReader {
        fn read<R: io::Read>(&self, message_type: u16, buffer: &mut R) -> Result<Option<Self::CustomMessage>, msgs::DecodeError>;
 }
 
+// TestEq is a dummy trait which requires PartialEq when built in testing, and otherwise is
+// blanket-implemented for all types.
+
+#[cfg(test)]
+pub trait TestEq : PartialEq {}
+#[cfg(test)]
+impl<T: PartialEq> TestEq for T {}
+
+#[cfg(not(test))]
+pub(crate) trait TestEq {}
+#[cfg(not(test))]
+impl<T> TestEq for T {}
+
+
 /// A Lightning message returned by [`read()`] when decoding bytes received over the wire. Each
 /// variant contains a message from [`msgs`] or otherwise the message type if unknown.
 #[allow(missing_docs)]
 #[derive(Debug)]
-pub(crate) enum Message<T> where T: core::fmt::Debug + Type {
+#[cfg_attr(test, derive(PartialEq))]
+pub(crate) enum Message<T> where T: core::fmt::Debug + Type + TestEq {
        Init(msgs::Init),
        Error(msgs::ErrorMessage),
        Warning(msgs::WarningMessage),
@@ -69,7 +84,7 @@ pub(crate) enum Message<T> where T: core::fmt::Debug + Type {
        Custom(T),
 }
 
-impl<T> Message<T> where T: core::fmt::Debug + Type {
+impl<T> Message<T> where T: core::fmt::Debug + Type + TestEq {
        /// Returns the type that was used to decode the message payload.
        pub fn type_id(&self) -> u16 {
                match self {
@@ -252,6 +267,7 @@ mod encode {
 
 pub(crate) use self::encode::Encode;
 
+#[cfg(not(test))]
 /// Defines a type identifier for sending messages over the wire.
 ///
 /// Messages implementing this trait specify a type and must be [`Writeable`].
@@ -260,10 +276,24 @@ pub trait Type: core::fmt::Debug + Writeable {
        fn type_id(&self) -> u16;
 }
 
+#[cfg(test)]
+pub trait Type: core::fmt::Debug + Writeable + PartialEq {
+       fn type_id(&self) -> u16;
+}
+
+#[cfg(any(feature = "_test_utils", fuzzing, test))]
+impl Type for () {
+       fn type_id(&self) -> u16 { unreachable!(); }
+}
+
+#[cfg(test)]
+impl<T: core::fmt::Debug + Writeable + PartialEq> Type for T where T: Encode {
+       fn type_id(&self) -> u16 { T::TYPE }
+}
+
+#[cfg(not(test))]
 impl<T: core::fmt::Debug + Writeable> Type for T where T: Encode {
-       fn type_id(&self) -> u16 {
-               T::TYPE
-       }
+       fn type_id(&self) -> u16 { T::TYPE }
 }
 
 impl Encode for msgs::Init {
@@ -471,10 +501,6 @@ mod tests {
                }
        }
 
-       impl Type for () {
-               fn type_id(&self) -> u16 { unreachable!(); }
-       }
-
        #[test]
        fn is_even_message_type() {
                let message = Message::<()>::Unknown(42);
index 39c2894b32c429b5115f4eee66c427bed21989b1..2e0679eba79f6cbbb1473ebdf89a0e113cf8fd5e 100644 (file)
@@ -10,7 +10,7 @@
 //! The top-level network map tracking logic lives here.
 
 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
-use bitcoin::secp256k1::key::PublicKey;
+use bitcoin::secp256k1::PublicKey;
 use bitcoin::secp256k1::Secp256k1;
 use bitcoin::secp256k1;
 
@@ -295,7 +295,7 @@ where C::Target: chain::Access, L::Target: Logger
 
 macro_rules! secp_verify_sig {
        ( $secp_ctx: expr, $msg: expr, $sig: expr, $pubkey: expr, $msg_type: expr ) => {
-               match $secp_ctx.verify($msg, $sig, $pubkey) {
+               match $secp_ctx.verify_ecdsa($msg, $sig, $pubkey) {
                        Ok(_) => {},
                        Err(_) => {
                                return Err(LightningError {
@@ -1356,10 +1356,10 @@ impl NetworkGraph {
        /// If built with `no-std`, any updates with a timestamp more than two weeks in the past or
        /// materially in the future will be rejected.
        pub fn update_channel_unsigned(&self, msg: &msgs::UnsignedChannelUpdate) -> Result<(), LightningError> {
-               self.update_channel_intern(msg, None, None::<(&secp256k1::Signature, &Secp256k1<secp256k1::VerifyOnly>)>)
+               self.update_channel_intern(msg, None, None::<(&secp256k1::ecdsa::Signature, &Secp256k1<secp256k1::VerifyOnly>)>)
        }
 
-       fn update_channel_intern<T: secp256k1::Verification>(&self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate>, sig_info: Option<(&secp256k1::Signature, &Secp256k1<T>)>) -> Result<(), LightningError> {
+       fn update_channel_intern<T: secp256k1::Verification>(&self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate>, sig_info: Option<(&secp256k1::ecdsa::Signature, &Secp256k1<T>)>) -> Result<(), LightningError> {
                let dest_node_id;
                let chan_enabled = msg.flags & (1 << 1) != (1 << 1);
                let chan_was_enabled;
@@ -1578,10 +1578,11 @@ mod tests {
 
        use hex;
 
-       use bitcoin::secp256k1::key::{PublicKey, SecretKey};
+       use bitcoin::secp256k1::{PublicKey, SecretKey};
        use bitcoin::secp256k1::{All, Secp256k1};
 
        use io;
+       use bitcoin::secp256k1;
        use prelude::*;
        use sync::Arc;
 
@@ -1628,7 +1629,7 @@ mod tests {
                f(&mut unsigned_announcement);
                let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]);
                NodeAnnouncement {
-                       signature: secp_ctx.sign(&msghash, node_key),
+                       signature: secp_ctx.sign_ecdsa(&msghash, node_key),
                        contents: unsigned_announcement
                }
        }
@@ -1652,10 +1653,10 @@ mod tests {
                f(&mut unsigned_announcement);
                let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]);
                ChannelAnnouncement {
-                       node_signature_1: secp_ctx.sign(&msghash, node_1_key),
-                       node_signature_2: secp_ctx.sign(&msghash, node_2_key),
-                       bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_btckey),
-                       bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_btckey),
+                       node_signature_1: secp_ctx.sign_ecdsa(&msghash, node_1_key),
+                       node_signature_2: secp_ctx.sign_ecdsa(&msghash, node_2_key),
+                       bitcoin_signature_1: secp_ctx.sign_ecdsa(&msghash, node_1_btckey),
+                       bitcoin_signature_2: secp_ctx.sign_ecdsa(&msghash, node_2_btckey),
                        contents: unsigned_announcement,
                }
        }
@@ -1687,7 +1688,7 @@ mod tests {
                f(&mut unsigned_channel_update);
                let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]);
                ChannelUpdate {
-                       signature: secp_ctx.sign(&msghash, node_key),
+                       signature: secp_ctx.sign_ecdsa(&msghash, node_key),
                        contents: unsigned_channel_update
                }
        }
@@ -1724,7 +1725,7 @@ mod tests {
                let fake_msghash = hash_to_message!(&zero_hash);
                match net_graph_msg_handler.handle_node_announcement(
                        &NodeAnnouncement {
-                               signature: secp_ctx.sign(&fake_msghash, node_1_privkey),
+                               signature: secp_ctx.sign_ecdsa(&fake_msghash, node_1_privkey),
                                contents: valid_announcement.contents.clone()
                }) {
                        Ok(_) => panic!(),
@@ -1963,7 +1964,7 @@ mod tests {
                }, node_1_privkey, &secp_ctx);
                let zero_hash = Sha256dHash::hash(&[0; 32]);
                let fake_msghash = hash_to_message!(&zero_hash);
-               invalid_sig_channel_update.signature = secp_ctx.sign(&fake_msghash, node_1_privkey);
+               invalid_sig_channel_update.signature = secp_ctx.sign_ecdsa(&fake_msghash, node_1_privkey);
                match net_graph_msg_handler.handle_channel_update(&invalid_sig_channel_update) {
                        Ok(_) => panic!(),
                        Err(e) => assert_eq!(e.err, "Invalid signature on channel_update message")
index 212ac2f10cae7d62695f032c6fd8ab0a4e523ffa..dc094b9b6e0309a8fc51cd01a75a1ad480c3d432 100644 (file)
@@ -12,7 +12,7 @@
 //! You probably want to create a NetGraphMsgHandler and use that as your RoutingMessageHandler and then
 //! interrogate it to get routes for your own payments.
 
-use bitcoin::secp256k1::key::PublicKey;
+use bitcoin::secp256k1::PublicKey;
 
 use ln::channelmanager::ChannelDetails;
 use ln::features::{ChannelFeatures, InvoiceFeatures, NodeFeatures};
@@ -412,7 +412,7 @@ impl<'a> CandidateRouteHop<'a> {
        fn effective_capacity(&self) -> EffectiveCapacity {
                match self {
                        CandidateRouteHop::FirstHop { details } => EffectiveCapacity::ExactLiquidity {
-                               liquidity_msat: details.outbound_capacity_msat,
+                               liquidity_msat: details.next_outbound_htlc_limit_msat,
                        },
                        CandidateRouteHop::PublicHop { info, .. } => info.effective_capacity(),
                        CandidateRouteHop::PrivateHop { .. } => EffectiveCapacity::Infinite,
@@ -818,7 +818,8 @@ where L::Target: Logger {
        // We don't want multiple paths (as per MPP) share liquidity of the same channels.
        // This map allows paths to be aware of the channel use by other paths in the same call.
        // This would help to make a better path finding decisions and not "overbook" channels.
-       // It is unaware of the directions (except for `outbound_capacity_msat` in `first_hops`).
+       // It is unaware of the directions (except for `next_outbound_htlc_limit_msat` in
+       // `first_hops`).
        let mut bookkept_channels_liquidity_available_msat = HashMap::with_capacity(network_nodes.len());
 
        // Keeping track of how much value we already collected across other paths. Helps to decide:
@@ -841,12 +842,12 @@ where L::Target: Logger {
                // sort channels above `recommended_value_msat` in ascending order, preferring channels
                // which have enough, but not too much, capacity for the payment.
                channels.sort_unstable_by(|chan_a, chan_b| {
-                       if chan_b.outbound_capacity_msat < recommended_value_msat || chan_a.outbound_capacity_msat < recommended_value_msat {
+                       if chan_b.next_outbound_htlc_limit_msat < recommended_value_msat || chan_a.next_outbound_htlc_limit_msat < recommended_value_msat {
                                // Sort in descending order
-                               chan_b.outbound_capacity_msat.cmp(&chan_a.outbound_capacity_msat)
+                               chan_b.next_outbound_htlc_limit_msat.cmp(&chan_a.next_outbound_htlc_limit_msat)
                        } else {
                                // Sort in ascending order
-                               chan_a.outbound_capacity_msat.cmp(&chan_b.outbound_capacity_msat)
+                               chan_a.next_outbound_htlc_limit_msat.cmp(&chan_b.next_outbound_htlc_limit_msat)
                        }
                });
        }
@@ -1720,7 +1721,7 @@ mod tests {
 
        use hex;
 
-       use bitcoin::secp256k1::key::{PublicKey,SecretKey};
+       use bitcoin::secp256k1::{PublicKey,SecretKey};
        use bitcoin::secp256k1::{Secp256k1, All};
 
        use prelude::*;
@@ -1746,6 +1747,7 @@ mod tests {
                        user_channel_id: 0,
                        balance_msat: 0,
                        outbound_capacity_msat,
+                       next_outbound_htlc_limit_msat: outbound_capacity_msat,
                        inbound_capacity_msat: 42,
                        unspendable_punishment_reserve: None,
                        confirmations_required: None,
@@ -1778,10 +1780,10 @@ mod tests {
 
                let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]);
                let valid_announcement = ChannelAnnouncement {
-                       node_signature_1: secp_ctx.sign(&msghash, node_1_privkey),
-                       node_signature_2: secp_ctx.sign(&msghash, node_2_privkey),
-                       bitcoin_signature_1: secp_ctx.sign(&msghash, node_1_privkey),
-                       bitcoin_signature_2: secp_ctx.sign(&msghash, node_2_privkey),
+                       node_signature_1: secp_ctx.sign_ecdsa(&msghash, node_1_privkey),
+                       node_signature_2: secp_ctx.sign_ecdsa(&msghash, node_2_privkey),
+                       bitcoin_signature_1: secp_ctx.sign_ecdsa(&msghash, node_1_privkey),
+                       bitcoin_signature_2: secp_ctx.sign_ecdsa(&msghash, node_2_privkey),
                        contents: unsigned_announcement.clone(),
                };
                match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) {
@@ -1796,7 +1798,7 @@ mod tests {
        ) {
                let msghash = hash_to_message!(&Sha256dHash::hash(&update.encode()[..])[..]);
                let valid_channel_update = ChannelUpdate {
-                       signature: secp_ctx.sign(&msghash, node_privkey),
+                       signature: secp_ctx.sign_ecdsa(&msghash, node_privkey),
                        contents: update.clone()
                };
 
@@ -1823,7 +1825,7 @@ mod tests {
                };
                let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]);
                let valid_announcement = NodeAnnouncement {
-                       signature: secp_ctx.sign(&msghash, node_privkey),
+                       signature: secp_ctx.sign_ecdsa(&msghash, node_privkey),
                        contents: unsigned_announcement.clone()
                };
 
@@ -3407,7 +3409,7 @@ mod tests {
                        assert_eq!(path.last().unwrap().fee_msat, 250_000_000);
                }
 
-               // Check that setting outbound_capacity_msat in first_hops limits the channels.
+               // Check that setting next_outbound_htlc_limit_msat in first_hops limits the channels.
                // Disable channel #1 and use another first hop.
                update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
                        chain_hash: genesis_block(Network::Testnet).header.block_hash(),
@@ -3422,7 +3424,7 @@ mod tests {
                        excess_data: Vec::new()
                });
 
-               // Now, limit the first_hop by the outbound_capacity_msat of 200_000 sats.
+               // Now, limit the first_hop by the next_outbound_htlc_limit_msat of 200_000 sats.
                let our_chans = vec![get_channel_details(Some(42), nodes[0].clone(), InitFeatures::from_le_bytes(vec![0b11]), 200_000_000)];
 
                {
@@ -5350,8 +5352,9 @@ mod tests {
                                let payment_params = PaymentParameters::from_node_id(dst);
                                let amt = seed as u64 % 200_000_000;
                                let params = ProbabilisticScoringParameters::default();
-                               let scorer = ProbabilisticScorer::new(params, &graph);
-                               if get_route(src, &payment_params, &graph.read_only(), None, amt, 42, &test_utils::TestLogger::new(), &scorer, &random_seed_bytes).is_ok() {
+                               let logger = test_utils::TestLogger::new();
+                               let scorer = ProbabilisticScorer::new(params, &graph, &logger);
+                               if get_route(src, &payment_params, &graph.read_only(), None, amt, 42, &logger, &scorer, &random_seed_bytes).is_ok() {
                                        continue 'load_endpoints;
                                }
                        }
@@ -5386,8 +5389,9 @@ mod tests {
                                let payment_params = PaymentParameters::from_node_id(dst).with_features(InvoiceFeatures::known());
                                let amt = seed as u64 % 200_000_000;
                                let params = ProbabilisticScoringParameters::default();
-                               let scorer = ProbabilisticScorer::new(params, &graph);
-                               if get_route(src, &payment_params, &graph.read_only(), None, amt, 42, &test_utils::TestLogger::new(), &scorer, &random_seed_bytes).is_ok() {
+                               let logger = test_utils::TestLogger::new();
+                               let scorer = ProbabilisticScorer::new(params, &graph, &logger);
+                               if get_route(src, &payment_params, &graph.read_only(), None, amt, 42, &logger, &scorer, &random_seed_bytes).is_ok() {
                                        continue 'load_endpoints;
                                }
                        }
@@ -5433,6 +5437,7 @@ mod benches {
        use ln::features::{InitFeatures, InvoiceFeatures};
        use routing::scoring::{FixedPenaltyScorer, ProbabilisticScorer, ProbabilisticScoringParameters, Scorer};
        use util::logger::{Logger, Record};
+       use util::test_utils::TestLogger;
 
        use test::Bencher;
 
@@ -5473,6 +5478,7 @@ mod benches {
                        user_channel_id: 0,
                        balance_msat: 10_000_000,
                        outbound_capacity_msat: 10_000_000,
+                       next_outbound_htlc_limit_msat: 10_000_000,
                        inbound_capacity_msat: 0,
                        unspendable_punishment_reserve: None,
                        confirmations_required: None,
@@ -5516,17 +5522,19 @@ mod benches {
 
        #[bench]
        fn generate_routes_with_probabilistic_scorer(bench: &mut Bencher) {
+               let logger = TestLogger::new();
                let network_graph = read_network_graph();
                let params = ProbabilisticScoringParameters::default();
-               let scorer = ProbabilisticScorer::new(params, &network_graph);
+               let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                generate_routes(bench, &network_graph, scorer, InvoiceFeatures::empty());
        }
 
        #[bench]
        fn generate_mpp_routes_with_probabilistic_scorer(bench: &mut Bencher) {
+               let logger = TestLogger::new();
                let network_graph = read_network_graph();
                let params = ProbabilisticScoringParameters::default();
-               let scorer = ProbabilisticScorer::new(params, &network_graph);
+               let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                generate_routes(bench, &network_graph, scorer, InvoiceFeatures::known());
        }
 
index 206cc0a83a5b0635acf3a98b48ba04fd4e0e4893..4c47aac47b64c2e78df2b39d0e740083023ecadc 100644 (file)
 //! # Example
 //!
 //! ```
-//! # extern crate secp256k1;
+//! # extern crate bitcoin;
 //! #
 //! # use lightning::routing::network_graph::NetworkGraph;
 //! # use lightning::routing::router::{RouteParameters, find_route};
 //! # use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringParameters, Scorer, ScoringParameters};
 //! # use lightning::chain::keysinterface::{KeysManager, KeysInterface};
 //! # use lightning::util::logger::{Logger, Record};
-//! # use secp256k1::key::PublicKey;
+//! # use bitcoin::secp256k1::PublicKey;
 //! #
 //! # struct FakeLogger {};
 //! # impl Logger for FakeLogger {
 //! #
 //! // Use the default channel penalties.
 //! let params = ProbabilisticScoringParameters::default();
-//! let scorer = ProbabilisticScorer::new(params, &network_graph);
+//! let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
 //!
 //! // Or use custom channel penalties.
 //! let params = ProbabilisticScoringParameters {
 //!     liquidity_penalty_multiplier_msat: 2 * 1000,
 //!     ..ProbabilisticScoringParameters::default()
 //! };
-//! let scorer = ProbabilisticScorer::new(params, &network_graph);
+//! let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
 //! # let random_seed_bytes = [42u8; 32];
 //!
 //! let route = find_route(&payer, &route_params, &network_graph, None, &logger, &scorer, &random_seed_bytes);
@@ -58,8 +58,10 @@ use ln::msgs::DecodeError;
 use routing::network_graph::{NetworkGraph, NodeId};
 use routing::router::RouteHop;
 use util::ser::{Readable, ReadableArgs, Writeable, Writer};
+use util::logger::Logger;
 
 use prelude::*;
+use core::fmt;
 use core::cell::{RefCell, RefMut};
 use core::ops::{Deref, DerefMut};
 use core::time::Duration;
@@ -135,6 +137,14 @@ pub trait LockableScore<'a> {
        fn lock(&'a self) -> Self::Locked;
 }
 
+/// Refers to a scorer that is accessible under lock and also writeable to disk
+///
+/// We need this trait to be able to pass in a scorer to `lightning-background-processor` that will enable us to
+/// use the Persister to persist it.
+pub trait WriteableScore<'a>: LockableScore<'a> + Writeable {}
+
+impl<'a, T> WriteableScore<'a> for T where T: LockableScore<'a> + Writeable {}
+
 /// (C-not exported)
 impl<'a, T: 'a + Score> LockableScore<'a> for Mutex<T> {
        type Locked = MutexGuard<'a, T>;
@@ -503,14 +513,15 @@ impl<T: Time> Readable for ChannelFailure<T> {
 /// behavior.
 ///
 /// [1]: https://arxiv.org/abs/2107.05322
-pub type ProbabilisticScorer<G> = ProbabilisticScorerUsingTime::<G, ConfiguredTime>;
+pub type ProbabilisticScorer<G, L> = ProbabilisticScorerUsingTime::<G, L, ConfiguredTime>;
 
 /// Probabilistic [`Score`] implementation.
 ///
 /// (C-not exported) generally all users should use the [`ProbabilisticScorer`] type alias.
-pub struct ProbabilisticScorerUsingTime<G: Deref<Target = NetworkGraph>, T: Time> {
+pub struct ProbabilisticScorerUsingTime<G: Deref<Target = NetworkGraph>, L: Deref, T: Time> where L::Target: Logger {
        params: ProbabilisticScoringParameters,
        network_graph: G,
+       logger: L,
        // TODO: Remove entries of closed channels.
        channel_liquidities: HashMap<u64, ChannelLiquidity<T>>,
 }
@@ -603,13 +614,14 @@ struct DirectedChannelLiquidity<L: Deref<Target = u64>, T: Time, U: Deref<Target
        half_life: Duration,
 }
 
-impl<G: Deref<Target = NetworkGraph>, T: Time> ProbabilisticScorerUsingTime<G, T> {
+impl<G: Deref<Target = NetworkGraph>, L: Deref, T: Time> ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
        /// Creates a new scorer using the given scoring parameters for sending payments from a node
        /// through a network graph.
-       pub fn new(params: ProbabilisticScoringParameters, network_graph: G) -> Self {
+       pub fn new(params: ProbabilisticScoringParameters, network_graph: G, logger: L) -> Self {
                Self {
                        params,
                        network_graph,
+                       logger,
                        channel_liquidities: HashMap::new(),
                }
        }
@@ -619,6 +631,33 @@ impl<G: Deref<Target = NetworkGraph>, T: Time> ProbabilisticScorerUsingTime<G, T
                assert!(self.channel_liquidities.insert(short_channel_id, liquidity).is_none());
                self
        }
+
+       /// Dump the contents of this scorer into the configured logger.
+       ///
+       /// Note that this writes roughly one line per channel for which we have a liquidity estimate,
+       /// which may be a substantial amount of log output.
+       pub fn debug_log_liquidity_stats(&self) {
+               let graph = self.network_graph.read_only();
+               for (scid, liq) in self.channel_liquidities.iter() {
+                       if let Some(chan_debug) = graph.channels().get(scid) {
+                               let log_direction = |source, target| {
+                                       if let Some((directed_info, _)) = chan_debug.as_directed_to(target) {
+                                               let amt = directed_info.effective_capacity().as_msat();
+                                               let dir_liq = liq.as_directed(source, target, amt, self.params.liquidity_offset_half_life);
+                                               log_debug!(self.logger, "Liquidity from {:?} to {:?} via {} is in the range ({}, {})",
+                                                       source, target, scid, dir_liq.min_liquidity_msat(), dir_liq.max_liquidity_msat());
+                                       } else {
+                                               log_debug!(self.logger, "No amount known for SCID {} from {:?} to {:?}", scid, source, target);
+                                       }
+                               };
+
+                               log_direction(&chan_debug.node_one, &chan_debug.node_two);
+                               log_direction(&chan_debug.node_two, &chan_debug.node_one);
+                       } else {
+                               log_debug!(self.logger, "No network graph entry for SCID {}", scid);
+                       }
+               }
+       }
 }
 
 impl ProbabilisticScoringParameters {
@@ -787,22 +826,29 @@ impl<L: Deref<Target = u64>, T: Time, U: Deref<Target = T>> DirectedChannelLiqui
 
 impl<L: DerefMut<Target = u64>, T: Time, U: DerefMut<Target = T>> DirectedChannelLiquidity<L, T, U> {
        /// Adjusts the channel liquidity balance bounds when failing to route `amount_msat`.
-       fn failed_at_channel(&mut self, amount_msat: u64) {
+       fn failed_at_channel<Log: Deref>(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger {
                if amount_msat < self.max_liquidity_msat() {
+                       log_debug!(logger, "Setting max liquidity of {} to {}", chan_descr, amount_msat);
                        self.set_max_liquidity_msat(amount_msat);
+               } else {
+                       log_trace!(logger, "Max liquidity of {} already more than {}", chan_descr, amount_msat);
                }
        }
 
        /// Adjusts the channel liquidity balance bounds when failing to route `amount_msat` downstream.
-       fn failed_downstream(&mut self, amount_msat: u64) {
+       fn failed_downstream<Log: Deref>(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger {
                if amount_msat > self.min_liquidity_msat() {
+                       log_debug!(logger, "Setting min liquidity of {} to {}", chan_descr, amount_msat);
                        self.set_min_liquidity_msat(amount_msat);
+               } else {
+                       log_trace!(logger, "Min liquidity of {} already less than {}", chan_descr, amount_msat);
                }
        }
 
        /// Adjusts the channel liquidity balance bounds when successfully routing `amount_msat`.
-       fn successful(&mut self, amount_msat: u64) {
+       fn successful<Log: Deref>(&mut self, amount_msat: u64, chan_descr: fmt::Arguments, logger: &Log) where Log::Target: Logger {
                let max_liquidity_msat = self.max_liquidity_msat().checked_sub(amount_msat).unwrap_or(0);
+               log_debug!(logger, "Subtracting {} from max liquidity of {} (setting it to {})", amount_msat, chan_descr, max_liquidity_msat);
                self.set_max_liquidity_msat(max_liquidity_msat);
        }
 
@@ -829,7 +875,7 @@ impl<L: DerefMut<Target = u64>, T: Time, U: DerefMut<Target = T>> DirectedChanne
        }
 }
 
-impl<G: Deref<Target = NetworkGraph>, T: Time> Score for ProbabilisticScorerUsingTime<G, T> {
+impl<G: Deref<Target = NetworkGraph>, L: Deref, T: Time> Score for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
        fn channel_penalty_msat(
                &self, short_channel_id: u64, amount_msat: u64, capacity_msat: u64, source: &NodeId,
                target: &NodeId
@@ -845,13 +891,18 @@ impl<G: Deref<Target = NetworkGraph>, T: Time> Score for ProbabilisticScorerUsin
        fn payment_path_failed(&mut self, path: &[&RouteHop], short_channel_id: u64) {
                let amount_msat = path.split_last().map(|(hop, _)| hop.fee_msat).unwrap_or(0);
                let liquidity_offset_half_life = self.params.liquidity_offset_half_life;
+               log_trace!(self.logger, "Scoring path through to SCID {} as having failed at {} msat", short_channel_id, amount_msat);
                let network_graph = self.network_graph.read_only();
-               for hop in path {
+               for (hop_idx, hop) in path.iter().enumerate() {
                        let target = NodeId::from_pubkey(&hop.pubkey);
                        let channel_directed_from_source = network_graph.channels()
                                .get(&hop.short_channel_id)
                                .and_then(|channel| channel.as_directed_to(&target));
 
+                       if hop.short_channel_id == short_channel_id && hop_idx == 0 {
+                               log_warn!(self.logger, "Payment failed at the first hop - we do not attempt to learn channel info in such cases as we can directly observe local state.\n\tBecause we know the local state, we should generally not see failures here - this may be an indication that your channel peer on channel {} is broken and you may wish to close the channel.", hop.short_channel_id);
+                       }
+
                        // Only score announced channels.
                        if let Some((channel, source)) = channel_directed_from_source {
                                let capacity_msat = channel.effective_capacity().as_msat();
@@ -860,7 +911,7 @@ impl<G: Deref<Target = NetworkGraph>, T: Time> Score for ProbabilisticScorerUsin
                                                .entry(hop.short_channel_id)
                                                .or_insert_with(ChannelLiquidity::new)
                                                .as_directed_mut(source, &target, capacity_msat, liquidity_offset_half_life)
-                                               .failed_at_channel(amount_msat);
+                                               .failed_at_channel(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
                                        break;
                                }
 
@@ -868,7 +919,10 @@ impl<G: Deref<Target = NetworkGraph>, T: Time> Score for ProbabilisticScorerUsin
                                        .entry(hop.short_channel_id)
                                        .or_insert_with(ChannelLiquidity::new)
                                        .as_directed_mut(source, &target, capacity_msat, liquidity_offset_half_life)
-                                       .failed_downstream(amount_msat);
+                                       .failed_downstream(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
+                       } else {
+                               log_debug!(self.logger, "Not able to penalize channel with SCID {} as we do not have graph info for it (likely a route-hint last-hop).",
+                                       hop.short_channel_id);
                        }
                }
        }
@@ -876,6 +930,8 @@ impl<G: Deref<Target = NetworkGraph>, T: Time> Score for ProbabilisticScorerUsin
        fn payment_path_successful(&mut self, path: &[&RouteHop]) {
                let amount_msat = path.split_last().map(|(hop, _)| hop.fee_msat).unwrap_or(0);
                let liquidity_offset_half_life = self.params.liquidity_offset_half_life;
+               log_trace!(self.logger, "Scoring path through SCID {} as having succeeded at {} msat.",
+                       path.split_last().map(|(hop, _)| hop.short_channel_id).unwrap_or(0), amount_msat);
                let network_graph = self.network_graph.read_only();
                for hop in path {
                        let target = NodeId::from_pubkey(&hop.pubkey);
@@ -890,7 +946,10 @@ impl<G: Deref<Target = NetworkGraph>, T: Time> Score for ProbabilisticScorerUsin
                                        .entry(hop.short_channel_id)
                                        .or_insert_with(ChannelLiquidity::new)
                                        .as_directed_mut(source, &target, capacity_msat, liquidity_offset_half_life)
-                                       .successful(amount_msat);
+                                       .successful(amount_msat, format_args!("SCID {}, towards {:?}", hop.short_channel_id, target), &self.logger);
+                       } else {
+                               log_debug!(self.logger, "Not able to learn for channel with SCID {} as we do not have graph info for it (likely a route-hint last-hop).",
+                                       hop.short_channel_id);
                        }
                }
        }
@@ -1206,7 +1265,7 @@ mod approx {
        }
 }
 
-impl<G: Deref<Target = NetworkGraph>, T: Time> Writeable for ProbabilisticScorerUsingTime<G, T> {
+impl<G: Deref<Target = NetworkGraph>, L: Deref, T: Time> Writeable for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
        #[inline]
        fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                write_tlv_fields!(w, {
@@ -1216,13 +1275,13 @@ impl<G: Deref<Target = NetworkGraph>, T: Time> Writeable for ProbabilisticScorer
        }
 }
 
-impl<G: Deref<Target = NetworkGraph>, T: Time>
-ReadableArgs<(ProbabilisticScoringParameters, G)> for ProbabilisticScorerUsingTime<G, T> {
+impl<G: Deref<Target = NetworkGraph>, L: Deref, T: Time>
+ReadableArgs<(ProbabilisticScoringParameters, G, L)> for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
        #[inline]
        fn read<R: Read>(
-               r: &mut R, args: (ProbabilisticScoringParameters, G)
+               r: &mut R, args: (ProbabilisticScoringParameters, G, L)
        ) -> Result<Self, DecodeError> {
-               let (params, network_graph) = args;
+               let (params, network_graph, logger) = args;
                let mut channel_liquidities = HashMap::new();
                read_tlv_fields!(r, {
                        (0, channel_liquidities, required)
@@ -1230,6 +1289,7 @@ ReadableArgs<(ProbabilisticScoringParameters, G)> for ProbabilisticScorerUsingTi
                Ok(Self {
                        params,
                        network_graph,
+                       logger,
                        channel_liquidities,
                })
        }
@@ -1351,6 +1411,7 @@ mod tests {
        use routing::network_graph::{NetworkGraph, NodeId};
        use routing::router::RouteHop;
        use util::ser::{Readable, ReadableArgs, Writeable};
+       use util::test_utils::TestLogger;
 
        use bitcoin::blockdata::constants::genesis_block;
        use bitcoin::hashes::Hash;
@@ -1695,7 +1756,7 @@ mod tests {
        // `ProbabilisticScorer` tests
 
        /// A probabilistic scorer for testing with time that can be manually advanced.
-       type ProbabilisticScorer<'a> = ProbabilisticScorerUsingTime::<&'a NetworkGraph, SinceEpoch>;
+       type ProbabilisticScorer<'a> = ProbabilisticScorerUsingTime::<&'a NetworkGraph, &'a TestLogger, SinceEpoch>;
 
        fn sender_privkey() -> SecretKey {
                SecretKey::from_slice(&[41; 32]).unwrap()
@@ -1752,10 +1813,10 @@ mod tests {
                };
                let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]);
                let signed_announcement = ChannelAnnouncement {
-                       node_signature_1: secp_ctx.sign(&msghash, &node_1_key),
-                       node_signature_2: secp_ctx.sign(&msghash, &node_2_key),
-                       bitcoin_signature_1: secp_ctx.sign(&msghash, &node_1_secret),
-                       bitcoin_signature_2: secp_ctx.sign(&msghash, &node_2_secret),
+                       node_signature_1: secp_ctx.sign_ecdsa(&msghash, &node_1_key),
+                       node_signature_2: secp_ctx.sign_ecdsa(&msghash, &node_2_key),
+                       bitcoin_signature_1: secp_ctx.sign_ecdsa(&msghash, &node_1_secret),
+                       bitcoin_signature_2: secp_ctx.sign_ecdsa(&msghash, &node_2_secret),
                        contents: unsigned_announcement,
                };
                let chain_source: Option<&::util::test_utils::TestChainSource> = None;
@@ -1784,7 +1845,7 @@ mod tests {
                };
                let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_update.encode()[..])[..]);
                let signed_update = ChannelUpdate {
-                       signature: secp_ctx.sign(&msghash, &node_key),
+                       signature: secp_ctx.sign_ecdsa(&msghash, &node_key),
                        contents: unsigned_update,
                };
                network_graph.update_channel(&signed_update, &secp_ctx).unwrap();
@@ -1821,10 +1882,11 @@ mod tests {
 
        #[test]
        fn liquidity_bounds_directed_from_lowest_node_id() {
+               let logger = TestLogger::new();
                let last_updated = SinceEpoch::now();
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters::default();
-               let mut scorer = ProbabilisticScorer::new(params, &network_graph)
+               let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger)
                        .with_channel(42,
                                ChannelLiquidity {
                                        min_liquidity_offset_msat: 700, max_liquidity_offset_msat: 100, last_updated
@@ -1895,10 +1957,11 @@ mod tests {
 
        #[test]
        fn resets_liquidity_upper_bound_when_crossed_by_lower_bound() {
+               let logger = TestLogger::new();
                let last_updated = SinceEpoch::now();
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters::default();
-               let mut scorer = ProbabilisticScorer::new(params, &network_graph)
+               let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger)
                        .with_channel(42,
                                ChannelLiquidity {
                                        min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated
@@ -1952,10 +2015,11 @@ mod tests {
 
        #[test]
        fn resets_liquidity_lower_bound_when_crossed_by_upper_bound() {
+               let logger = TestLogger::new();
                let last_updated = SinceEpoch::now();
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters::default();
-               let mut scorer = ProbabilisticScorer::new(params, &network_graph)
+               let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger)
                        .with_channel(42,
                                ChannelLiquidity {
                                        min_liquidity_offset_msat: 200, max_liquidity_offset_msat: 400, last_updated
@@ -2009,12 +2073,13 @@ mod tests {
 
        #[test]
        fn increased_penalty_nearing_liquidity_upper_bound() {
+               let logger = TestLogger::new();
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
                        liquidity_penalty_multiplier_msat: 1_000,
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
-               let scorer = ProbabilisticScorer::new(params, &network_graph);
+               let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                let source = source_node_id();
                let target = target_node_id();
 
@@ -2034,13 +2099,14 @@ mod tests {
 
        #[test]
        fn constant_penalty_outside_liquidity_bounds() {
+               let logger = TestLogger::new();
                let last_updated = SinceEpoch::now();
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
                        liquidity_penalty_multiplier_msat: 1_000,
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
-               let scorer = ProbabilisticScorer::new(params, &network_graph)
+               let scorer = ProbabilisticScorer::new(params, &network_graph, &logger)
                        .with_channel(42,
                                ChannelLiquidity {
                                        min_liquidity_offset_msat: 40, max_liquidity_offset_msat: 40, last_updated
@@ -2056,12 +2122,13 @@ mod tests {
 
        #[test]
        fn does_not_further_penalize_own_channel() {
+               let logger = TestLogger::new();
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
                        liquidity_penalty_multiplier_msat: 1_000,
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
-               let mut scorer = ProbabilisticScorer::new(params, &network_graph);
+               let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                let sender = sender_node_id();
                let source = source_node_id();
                let failed_path = payment_path_for_amount(500);
@@ -2078,12 +2145,13 @@ mod tests {
 
        #[test]
        fn sets_liquidity_lower_bound_on_downstream_failure() {
+               let logger = TestLogger::new();
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
                        liquidity_penalty_multiplier_msat: 1_000,
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
-               let mut scorer = ProbabilisticScorer::new(params, &network_graph);
+               let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                let source = source_node_id();
                let target = target_node_id();
                let path = payment_path_for_amount(500);
@@ -2101,12 +2169,13 @@ mod tests {
 
        #[test]
        fn sets_liquidity_upper_bound_on_failure() {
+               let logger = TestLogger::new();
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
                        liquidity_penalty_multiplier_msat: 1_000,
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
-               let mut scorer = ProbabilisticScorer::new(params, &network_graph);
+               let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                let source = source_node_id();
                let target = target_node_id();
                let path = payment_path_for_amount(500);
@@ -2124,12 +2193,13 @@ mod tests {
 
        #[test]
        fn reduces_liquidity_upper_bound_along_path_on_success() {
+               let logger = TestLogger::new();
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
                        liquidity_penalty_multiplier_msat: 1_000,
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
-               let mut scorer = ProbabilisticScorer::new(params, &network_graph);
+               let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                let sender = sender_node_id();
                let source = source_node_id();
                let target = target_node_id();
@@ -2149,13 +2219,14 @@ mod tests {
 
        #[test]
        fn decays_liquidity_bounds_over_time() {
+               let logger = TestLogger::new();
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
                        liquidity_penalty_multiplier_msat: 1_000,
                        liquidity_offset_half_life: Duration::from_secs(10),
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
-               let mut scorer = ProbabilisticScorer::new(params, &network_graph);
+               let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                let source = source_node_id();
                let target = target_node_id();
 
@@ -2201,13 +2272,14 @@ mod tests {
 
        #[test]
        fn decays_liquidity_bounds_without_shift_overflow() {
+               let logger = TestLogger::new();
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
                        liquidity_penalty_multiplier_msat: 1_000,
                        liquidity_offset_half_life: Duration::from_secs(10),
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
-               let mut scorer = ProbabilisticScorer::new(params, &network_graph);
+               let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                let source = source_node_id();
                let target = target_node_id();
                assert_eq!(scorer.channel_penalty_msat(42, 256, 1_024, &source, &target), 125);
@@ -2226,13 +2298,14 @@ mod tests {
 
        #[test]
        fn restricts_liquidity_bounds_after_decay() {
+               let logger = TestLogger::new();
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
                        liquidity_penalty_multiplier_msat: 1_000,
                        liquidity_offset_half_life: Duration::from_secs(10),
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
-               let mut scorer = ProbabilisticScorer::new(params, &network_graph);
+               let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                let source = source_node_id();
                let target = target_node_id();
 
@@ -2264,13 +2337,14 @@ mod tests {
 
        #[test]
        fn restores_persisted_liquidity_bounds() {
+               let logger = TestLogger::new();
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
                        liquidity_penalty_multiplier_msat: 1_000,
                        liquidity_offset_half_life: Duration::from_secs(10),
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
-               let mut scorer = ProbabilisticScorer::new(params, &network_graph);
+               let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                let source = source_node_id();
                let target = target_node_id();
 
@@ -2288,19 +2362,20 @@ mod tests {
 
                let mut serialized_scorer = io::Cursor::new(&serialized_scorer);
                let deserialized_scorer =
-                       <ProbabilisticScorer>::read(&mut serialized_scorer, (params, &network_graph)).unwrap();
+                       <ProbabilisticScorer>::read(&mut serialized_scorer, (params, &network_graph, &logger)).unwrap();
                assert_eq!(deserialized_scorer.channel_penalty_msat(42, 500, 1_000, &source, &target), 300);
        }
 
        #[test]
        fn decays_persisted_liquidity_bounds() {
+               let logger = TestLogger::new();
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters {
                        liquidity_penalty_multiplier_msat: 1_000,
                        liquidity_offset_half_life: Duration::from_secs(10),
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
-               let mut scorer = ProbabilisticScorer::new(params, &network_graph);
+               let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                let source = source_node_id();
                let target = target_node_id();
 
@@ -2314,7 +2389,7 @@ mod tests {
 
                let mut serialized_scorer = io::Cursor::new(&serialized_scorer);
                let deserialized_scorer =
-                       <ProbabilisticScorer>::read(&mut serialized_scorer, (params, &network_graph)).unwrap();
+                       <ProbabilisticScorer>::read(&mut serialized_scorer, (params, &network_graph, &logger)).unwrap();
                assert_eq!(deserialized_scorer.channel_penalty_msat(42, 500, 1_000, &source, &target), 473);
 
                scorer.payment_path_failed(&payment_path_for_amount(250).iter().collect::<Vec<_>>(), 43);
@@ -2328,9 +2403,10 @@ mod tests {
        fn scores_realistic_payments() {
                // Shows the scores of "realistic" sends of 100k sats over channels of 1-10m sats (with a
                // 50k sat reserve).
+               let logger = TestLogger::new();
                let network_graph = network_graph();
                let params = ProbabilisticScoringParameters::default();
-               let scorer = ProbabilisticScorer::new(params, &network_graph);
+               let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                let source = source_node_id();
                let target = target_node_id();
 
@@ -2349,6 +2425,7 @@ mod tests {
 
        #[test]
        fn adds_base_penalty_to_liquidity_penalty() {
+               let logger = TestLogger::new();
                let network_graph = network_graph();
                let source = source_node_id();
                let target = target_node_id();
@@ -2357,18 +2434,19 @@ mod tests {
                        liquidity_penalty_multiplier_msat: 1_000,
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
-               let scorer = ProbabilisticScorer::new(params, &network_graph);
+               let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                assert_eq!(scorer.channel_penalty_msat(42, 128, 1_024, &source, &target), 58);
 
                let params = ProbabilisticScoringParameters {
                        base_penalty_msat: 500, liquidity_penalty_multiplier_msat: 1_000, ..Default::default()
                };
-               let scorer = ProbabilisticScorer::new(params, &network_graph);
+               let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                assert_eq!(scorer.channel_penalty_msat(42, 128, 1_024, &source, &target), 558);
        }
 
        #[test]
        fn adds_amount_penalty_to_liquidity_penalty() {
+               let logger = TestLogger::new();
                let network_graph = network_graph();
                let source = source_node_id();
                let target = target_node_id();
@@ -2378,7 +2456,7 @@ mod tests {
                        amount_penalty_multiplier_msat: 0,
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
-               let scorer = ProbabilisticScorer::new(params, &network_graph);
+               let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                assert_eq!(scorer.channel_penalty_msat(42, 512_000, 1_024_000, &source, &target), 300);
 
                let params = ProbabilisticScoringParameters {
@@ -2386,12 +2464,13 @@ mod tests {
                        amount_penalty_multiplier_msat: 256,
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
-               let scorer = ProbabilisticScorer::new(params, &network_graph);
+               let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                assert_eq!(scorer.channel_penalty_msat(42, 512_000, 1_024_000, &source, &target), 337);
        }
 
        #[test]
        fn calculates_log10_without_overflowing_u64_max_value() {
+               let logger = TestLogger::new();
                let network_graph = network_graph();
                let source = source_node_id();
                let target = target_node_id();
@@ -2400,7 +2479,7 @@ mod tests {
                        liquidity_penalty_multiplier_msat: 40_000,
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
-               let scorer = ProbabilisticScorer::new(params, &network_graph);
+               let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                assert_eq!(
                        scorer.channel_penalty_msat(42, u64::max_value(), u64::max_value(), &source, &target),
                        80_000,
index bde547036536b479bcf181740e0ea31a7cf42ebb..482759b8ca88b626fd0585f0e73f84d33a7d967c 100644 (file)
@@ -113,3 +113,5 @@ impl<T> RwLock<T> {
                Err(())
        }
 }
+
+pub type FairRwLock<T> = RwLock<T>;
index bd8b40b66567e8a5346e8e551568aaf1d539eae5..3868d29aab49f66789c49c970a662cba19000e2e 100644 (file)
@@ -10,6 +10,7 @@
 //! Various user-configurable channel limits and settings which ChannelManager
 //! applies for you.
 
+use ln::channel::MAX_FUNDING_SATOSHIS_NO_WUMBO;
 use ln::channelmanager::{BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT};
 
 /// Configuration we set when applicable.
@@ -47,6 +48,30 @@ pub struct ChannelHandshakeConfig {
        /// Default value: 1. If the value is less than 1, it is ignored and set to 1, as is required
        /// by the protocol.
        pub our_htlc_minimum_msat: u64,
+       /// Sets the percentage of the channel value we will cap the total value of outstanding inbound
+       /// HTLCs to.
+       ///
+       /// This can be set to a value between 1-100, where the value corresponds to the percent of the
+       /// channel value in whole percentages.
+       ///
+       /// Note that:
+       /// * If configured to another value than the default value 10, any new channels created with
+       /// the non default value will cause versions of LDK prior to 0.0.104 to refuse to read the
+       /// `ChannelManager`.
+       ///
+       /// * This caps the total value for inbound HTLCs in-flight only, and there's currently
+       /// no way to configure the cap for the total value of outbound HTLCs in-flight.
+       ///
+       /// * The requirements for your node being online to ensure the safety of HTLC-encumbered funds
+       /// are different from the non-HTLC-encumbered funds. This makes this an important knob to
+       /// restrict exposure to loss due to being offline for too long.
+       /// See [`ChannelHandshakeConfig::our_to_self_delay`] and [`ChannelConfig::cltv_expiry_delta`]
+       /// for more information.
+       ///
+       /// Default value: 10.
+       /// Minimum value: 1, any values less than 1 will be treated as 1 instead.
+       /// Maximum value: 100, any values larger than 100 will be treated as 100 instead.
+       pub max_inbound_htlc_value_in_flight_percent_of_channel: u8,
        /// If set, we attempt to negotiate the `scid_privacy` (referred to as `scid_alias` in the
        /// BOLTs) option for outbound private channels. This provides better privacy by not including
        /// our real on-chain channel UTXO in each invoice and requiring that our counterparty only
@@ -77,6 +102,7 @@ impl Default for ChannelHandshakeConfig {
                        minimum_depth: 6,
                        our_to_self_delay: BREAKDOWN_TIMEOUT,
                        our_htlc_minimum_msat: 1,
+                       max_inbound_htlc_value_in_flight_percent_of_channel: 10,
                        negotiate_scid_privacy: false,
                }
        }
@@ -95,11 +121,16 @@ impl Default for ChannelHandshakeConfig {
 /// are applied mostly only to incoming channels that's not much of a problem.
 #[derive(Copy, Clone, Debug)]
 pub struct ChannelHandshakeLimits {
-       /// Minimum allowed satoshis when a channel is funded, this is supplied by the sender and so
+       /// Minimum allowed satoshis when a channel is funded. This is supplied by the sender and so
        /// only applies to inbound channels.
        ///
        /// Default value: 0.
        pub min_funding_satoshis: u64,
+       /// Maximum allowed satoshis when a channel is funded. This is supplied by the sender and so
+       /// only applies to inbound channels.
+       ///
+       /// Default value: 2^24 - 1.
+       pub max_funding_satoshis: u64,
        /// The remote node sets a limit on the minimum size of HTLCs we can send to them. This allows
        /// you to limit the maximum minimum-size they can require.
        ///
@@ -151,6 +182,7 @@ impl Default for ChannelHandshakeLimits {
        fn default() -> Self {
                ChannelHandshakeLimits {
                        min_funding_satoshis: 0,
+                       max_funding_satoshis: MAX_FUNDING_SATOSHIS_NO_WUMBO,
                        max_htlc_minimum_msat: <u64>::max_value(),
                        min_max_htlc_value_in_flight_msat: 0,
                        max_channel_reserve_satoshis: <u64>::max_value(),
index 300ddacb020566dac6fc77ac537077918b76e925..2f2d33b29f7ea53c0a2dda084ac7a139583c6345 100644 (file)
@@ -1,7 +1,7 @@
 use bitcoin::hashes::{Hash, HashEngine};
 use bitcoin::hashes::hmac::{Hmac, HmacEngine};
 use bitcoin::hashes::sha256::Hash as Sha256;
-use bitcoin::secp256k1::{Message, Secp256k1, SecretKey, Signature, Signing};
+use bitcoin::secp256k1::{Message, Secp256k1, SecretKey, ecdsa::Signature, Signing};
 
 macro_rules! hkdf_extract_expand {
        ($salt: expr, $ikm: expr) => {{
@@ -41,8 +41,8 @@ pub fn hkdf_extract_expand_thrice(salt: &[u8], ikm: &[u8]) -> ([u8; 32], [u8; 32
 #[inline]
 pub fn sign<C: Signing>(ctx: &Secp256k1<C>, msg: &Message, sk: &SecretKey) -> Signature {
        #[cfg(feature = "grind_signatures")]
-       let sig = ctx.sign_low_r(msg, sk);
+       let sig = ctx.sign_ecdsa_low_r(msg, sk);
        #[cfg(not(feature = "grind_signatures"))]
-       let sig = ctx.sign(msg, sk);
+       let sig = ctx.sign_ecdsa(msg, sk);
        sig
 }
index 2e22df7c5af10c1c4e68dc9ec6baf40990ab7840..b4b66e8f5feb97d52ff52be9e046b55aa71bd532 100644 (file)
@@ -16,12 +16,12 @@ use core::cmp;
 use sync::{Mutex, Arc};
 #[cfg(test)] use sync::MutexGuard;
 
-use bitcoin::blockdata::transaction::{Transaction, SigHashType};
-use bitcoin::util::bip143;
+use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
+use bitcoin::util::sighash;
 
 use bitcoin::secp256k1;
-use bitcoin::secp256k1::key::{SecretKey, PublicKey};
-use bitcoin::secp256k1::{Secp256k1, Signature};
+use bitcoin::secp256k1::{SecretKey, PublicKey};
+use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
 use util::ser::{Writeable, Writer};
 use io::Error;
 
@@ -160,8 +160,8 @@ impl BaseSign for EnforcingSigner {
 
                        let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&this_htlc, self.opt_anchors(), &keys);
 
-                       let sighash = hash_to_message!(&bip143::SigHashCache::new(&htlc_tx).signature_hash(0, &htlc_redeemscript, this_htlc.amount_msat / 1000, SigHashType::All)[..]);
-                       secp_ctx.verify(&sighash, sig, &keys.countersignatory_htlc_key).unwrap();
+                       let sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, this_htlc.amount_msat / 1000, EcdsaSighashType::All).unwrap()[..]);
+                       secp_ctx.verify_ecdsa(&sighash, sig, &keys.countersignatory_htlc_key).unwrap();
                }
 
                Ok(self.inner.sign_holder_commitment_and_htlcs(commitment_tx, secp_ctx).unwrap())
index b97afd4ccd3e3ccb4150ec20425a24dde88dcf81..5953ee68940e61472f4e2acc2665dd89a0f71c98 100644 (file)
@@ -29,7 +29,7 @@ use bitcoin::Transaction;
 use bitcoin::blockdata::script::Script;
 use bitcoin::hashes::Hash;
 use bitcoin::hashes::sha256::Hash as Sha256;
-use bitcoin::secp256k1::key::PublicKey;
+use bitcoin::secp256k1::PublicKey;
 use io;
 use prelude::*;
 use core::time::Duration;
@@ -100,12 +100,11 @@ pub enum ClosureReason {
                /// A developer-readable error message which we generated.
                err: String,
        },
-       /// The `PeerManager` informed us that we've disconnected from the peer. We close channels
-       /// if the `PeerManager` informed us that it is unlikely we'll be able to connect to the
-       /// peer again in the future or if the peer disconnected before we finished negotiating
-       /// the channel open. The first case may be caused by incompatible features which our
-       /// counterparty, or we, require.
-       //TODO: split between PeerUnconnectable/PeerDisconnected ?
+       /// The peer disconnected prior to funding completing. In this case the spec mandates that we
+       /// forget the channel entirely - we can attempt again if the peer reconnects.
+       ///
+       /// In LDK versions prior to 0.0.107 this could also occur if we were unable to connect to the
+       /// peer because of mutual incompatibility between us and our channel counterparty.
        DisconnectedPeer,
        /// Closure generated from `ChannelManager::read` if the ChannelMonitor is newer than
        /// the ChannelManager deserialized.
@@ -230,6 +229,47 @@ pub enum Event {
                /// [`Route::get_total_fees`]: crate::routing::router::Route::get_total_fees
                fee_paid_msat: Option<u64>,
        },
+       /// Indicates an outbound payment failed. Individual [`Event::PaymentPathFailed`] events
+       /// provide failure information for each MPP part in the payment.
+       ///
+       /// This event is provided once there are no further pending HTLCs for the payment and the
+       /// payment is no longer retryable, either due to a several-block timeout or because
+       /// [`ChannelManager::abandon_payment`] was previously called for the corresponding payment.
+       ///
+       /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
+       PaymentFailed {
+               /// The id returned by [`ChannelManager::send_payment`] and used with
+               /// [`ChannelManager::retry_payment`] and [`ChannelManager::abandon_payment`].
+               ///
+               /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
+               /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment
+               /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
+               payment_id: PaymentId,
+               /// The hash that was given to [`ChannelManager::send_payment`].
+               ///
+               /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
+               payment_hash: PaymentHash,
+       },
+       /// Indicates that a path for an outbound payment was successful.
+       ///
+       /// Always generated after [`Event::PaymentSent`] and thus useful for scoring channels. See
+       /// [`Event::PaymentSent`] for obtaining the payment preimage.
+       PaymentPathSuccessful {
+               /// The id returned by [`ChannelManager::send_payment`] and used with
+               /// [`ChannelManager::retry_payment`].
+               ///
+               /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
+               /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment
+               payment_id: PaymentId,
+               /// The hash that was given to [`ChannelManager::send_payment`].
+               ///
+               /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
+               payment_hash: Option<PaymentHash>,
+               /// The payment path that was successful.
+               ///
+               /// May contain a closed channel if the HTLC sent along the path was fulfilled on chain.
+               path: Vec<RouteHop>,
+       },
        /// Indicates an outbound HTLC we sent failed. Probably some intermediary node dropped
        /// something. You may wish to retry with a different route.
        ///
@@ -299,27 +339,6 @@ pub enum Event {
 #[cfg(test)]
                error_data: Option<Vec<u8>>,
        },
-       /// Indicates an outbound payment failed. Individual [`Event::PaymentPathFailed`] events
-       /// provide failure information for each MPP part in the payment.
-       ///
-       /// This event is provided once there are no further pending HTLCs for the payment and the
-       /// payment is no longer retryable, either due to a several-block timeout or because
-       /// [`ChannelManager::abandon_payment`] was previously called for the corresponding payment.
-       ///
-       /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
-       PaymentFailed {
-               /// The id returned by [`ChannelManager::send_payment`] and used with
-               /// [`ChannelManager::retry_payment`] and [`ChannelManager::abandon_payment`].
-               ///
-               /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
-               /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment
-               /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
-               payment_id: PaymentId,
-               /// The hash that was given to [`ChannelManager::send_payment`].
-               ///
-               /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
-               payment_hash: PaymentHash,
-       },
        /// Used to indicate that [`ChannelManager::process_pending_htlc_forwards`] should be called at
        /// a time in the future.
        ///
@@ -393,26 +412,6 @@ pub enum Event {
                /// The full transaction received from the user
                transaction: Transaction
        },
-       /// Indicates that a path for an outbound payment was successful.
-       ///
-       /// Always generated after [`Event::PaymentSent`] and thus useful for scoring channels. See
-       /// [`Event::PaymentSent`] for obtaining the payment preimage.
-       PaymentPathSuccessful {
-               /// The id returned by [`ChannelManager::send_payment`] and used with
-               /// [`ChannelManager::retry_payment`].
-               ///
-               /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
-               /// [`ChannelManager::retry_payment`]: crate::ln::channelmanager::ChannelManager::retry_payment
-               payment_id: PaymentId,
-               /// The hash that was given to [`ChannelManager::send_payment`].
-               ///
-               /// [`ChannelManager::send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
-               payment_hash: Option<PaymentHash>,
-               /// The payment path that was successful.
-               ///
-               /// May contain a closed channel if the HTLC sent along the path was fulfilled on chain.
-               path: Vec<RouteHop>,
-       },
        /// Indicates a request to open a new channel by a peer.
        ///
        /// To accept the request, call [`ChannelManager::accept_inbound_channel`]. To reject the
diff --git a/lightning/src/util/fairrwlock.rs b/lightning/src/util/fairrwlock.rs
new file mode 100644 (file)
index 0000000..c9b3866
--- /dev/null
@@ -0,0 +1,46 @@
+use std::sync::{LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard};
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+/// Rust libstd's RwLock does not provide any fairness guarantees (and, in fact, when used on
+/// Linux with pthreads under the hood, readers trivially and completely starve writers).
+/// Because we often hold read locks while doing message processing in multiple threads which
+/// can use significant CPU time, with write locks being time-sensitive but relatively small in
+/// CPU time, we can end up with starvation completely blocking incoming connections or pings,
+/// especially during initial graph sync.
+///
+/// Thus, we need to block readers when a writer is pending, which we do with a trivial RwLock
+/// wrapper here. Its not particularly optimized, but provides some reasonable fairness by
+/// blocking readers (by taking the write lock) if there are writers pending when we go to take
+/// a read lock.
+pub struct FairRwLock<T> {
+       lock: RwLock<T>,
+       waiting_writers: AtomicUsize,
+}
+
+impl<T> FairRwLock<T> {
+       pub fn new(t: T) -> Self {
+               Self { lock: RwLock::new(t), waiting_writers: AtomicUsize::new(0) }
+       }
+
+       // Note that all atomic accesses are relaxed, as we do not rely on the atomics here for any
+       // ordering at all, instead relying on the underlying RwLock to provide ordering of unrelated
+       // memory.
+       pub fn write(&self) -> LockResult<RwLockWriteGuard<T>> {
+               self.waiting_writers.fetch_add(1, Ordering::Relaxed);
+               let res = self.lock.write();
+               self.waiting_writers.fetch_sub(1, Ordering::Relaxed);
+               res
+       }
+
+       pub fn read(&self) -> LockResult<RwLockReadGuard<T>> {
+               if self.waiting_writers.load(Ordering::Relaxed) != 0 {
+                       let _write_queue_lock = self.lock.write();
+               }
+               // Note that we don't consider ensuring that an underlying RwLock allowing writers to
+               // starve readers doesn't exhibit the same behavior here. I'm not aware of any
+               // libstd-backing RwLock which exhibits this behavior, and as documented in the
+               // struct-level documentation, it shouldn't pose a significant issue for our current
+               // codebase.
+               self.lock.read()
+       }
+}
index 2dd73d8279cff447576817450601c85112adce74..eadfdbdfc67aaeea13477dc7b5315df4f4ab42cd 100644 (file)
@@ -12,7 +12,7 @@ use chain::keysinterface::SpendableOutputDescriptor;
 
 use bitcoin::hash_types::Txid;
 use bitcoin::blockdata::transaction::Transaction;
-use bitcoin::secp256k1::key::PublicKey;
+use bitcoin::secp256k1::PublicKey;
 
 use routing::router::Route;
 use ln::chan_utils::HTLCType;
index 8beff835a4bffe1f0b3d59e672380323b6164f98..593b01dff90d9b5a36bad535a2269a5e73a7c496 100644 (file)
@@ -23,7 +23,7 @@
 use prelude::*;
 use crate::util::zbase32;
 use bitcoin::hashes::{sha256d, Hash};
-use bitcoin::secp256k1::recovery::{RecoverableSignature, RecoveryId};
+use bitcoin::secp256k1::ecdsa::{RecoverableSignature, RecoveryId};
 use bitcoin::secp256k1::{Error, Message, PublicKey, Secp256k1, SecretKey};
 
 static LN_MESSAGE_PREFIX: &[u8] = b"Lightning Signed Message:";
@@ -57,7 +57,7 @@ pub fn sign(msg: &[u8], sk: &SecretKey) -> Result<String, Error> {
     let secp_ctx = Secp256k1::signing_only();
     let msg_hash = sha256d::Hash::hash(&[LN_MESSAGE_PREFIX, msg].concat());
 
-    let sig = secp_ctx.sign_recoverable(&Message::from_slice(&msg_hash)?, sk);
+    let sig = secp_ctx.sign_ecdsa_recoverable(&Message::from_slice(&msg_hash)?, sk);
     Ok(zbase32::encode(&sigrec_encode(sig)))
 }
 
@@ -69,7 +69,7 @@ pub fn recover_pk(msg: &[u8], sig: &str) ->  Result<PublicKey, Error> {
     match zbase32::decode(&sig) {
         Ok(sig_rec) => {
             match sigrec_decode(sig_rec) {
-                Ok(sig) => secp_ctx.recover(&Message::from_slice(&msg_hash)?, &sig),
+                Ok(sig) => secp_ctx.recover_ecdsa(&Message::from_slice(&msg_hash)?, &sig),
                 Err(e) => Err(e)
             }
         },
@@ -90,7 +90,7 @@ pub fn verify(msg: &[u8], sig: &str, pk: &PublicKey) -> bool {
 mod test {
     use core::str::FromStr;
     use util::message_signing::{sign, recover_pk, verify};
-    use bitcoin::secp256k1::key::ONE_KEY;
+    use bitcoin::secp256k1::ONE_KEY;
     use bitcoin::secp256k1::{PublicKey, Secp256k1};
 
     #[test]
index 95826b7e06ee73e0a02b3e1b36cf8ac6ef9ed12e..b7ee02d2c1f5724ba0b0b6309ce2eb35ad321c1d 100644 (file)
@@ -25,6 +25,8 @@ pub mod persist;
 pub(crate) mod atomic_counter;
 pub(crate) mod byte_utils;
 pub(crate) mod chacha20;
+#[cfg(all(not(test), feature = "std"))]
+pub(crate) mod fairrwlock;
 #[cfg(fuzzing)]
 pub mod zbase32;
 #[cfg(not(fuzzing))]
index 9476331c15618fb3cf099b7b875dee8e2ec18cd0..5c124c21afdb25984d9d43362726f858fb292c8b 100644 (file)
@@ -11,6 +11,7 @@
 use core::ops::Deref;
 use bitcoin::hashes::hex::ToHex;
 use io::{self};
+use routing::scoring::WriteableScore;
 
 use crate::{chain::{keysinterface::{Sign, KeysInterface}, self, transaction::{OutPoint}, chaininterface::{BroadcasterInterface, FeeEstimator}, chainmonitor::{Persist, MonitorUpdateId}, channelmonitor::{ChannelMonitor, ChannelMonitorUpdate}}, ln::channelmanager::ChannelManager, routing::network_graph::NetworkGraph};
 use super::{logger::Logger, ser::Writeable};
@@ -24,37 +25,47 @@ pub trait KVStorePersister {
        fn persist<W: Writeable>(&self, key: &str, object: &W) -> io::Result<()>;
 }
 
-/// Trait that handles persisting a [`ChannelManager`] and [`NetworkGraph`] to disk.
-pub trait Persister<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
+/// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk.
+pub trait Persister<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref, S>
        where M::Target: 'static + chain::Watch<Signer>,
                T::Target: 'static + BroadcasterInterface,
                K::Target: 'static + KeysInterface<Signer = Signer>,
                F::Target: 'static + FeeEstimator,
                L::Target: 'static + Logger,
+               S: WriteableScore<'a>,
 {
        /// Persist the given ['ChannelManager'] to disk, returning an error if persistence failed.
        fn persist_manager(&self, channel_manager: &ChannelManager<Signer, M, T, K, F, L>) -> Result<(), io::Error>;
 
        /// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
        fn persist_graph(&self, network_graph: &NetworkGraph) -> Result<(), io::Error>;
+
+       /// Persist the given [`WriteableScore`] to disk, returning an error if persistence failed.
+       fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error>;
 }
 
-impl<A: KVStorePersister, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Persister<Signer, M, T, K, F, L> for A
+impl<'a, A: KVStorePersister, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref, S> Persister<'a, Signer, M, T, K, F, L, S> for A
        where M::Target: 'static + chain::Watch<Signer>,
                T::Target: 'static + BroadcasterInterface,
                K::Target: 'static + KeysInterface<Signer = Signer>,
                F::Target: 'static + FeeEstimator,
                L::Target: 'static + Logger,
+               S: WriteableScore<'a>,
 {
-       /// Persist the given ['ChannelManager'] to disk, returning an error if persistence failed.
+       /// Persist the given ['ChannelManager'] to disk with the name "manager", returning an error if persistence failed.
        fn persist_manager(&self, channel_manager: &ChannelManager<Signer, M, T, K, F, L>) -> Result<(), io::Error> {
                self.persist("manager", channel_manager)
        }
 
-       /// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
+       /// Persist the given [`NetworkGraph`] to disk with the name "network_graph", returning an error if persistence failed.
        fn persist_graph(&self, network_graph: &NetworkGraph) -> Result<(), io::Error> {
                self.persist("network_graph", network_graph)
        }
+
+       /// Persist the given [`WriteableScore`] to disk with name "scorer", returning an error if persistence failed.
+       fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> {
+               self.persist("scorer", &scorer)
+       }
 }
 
 impl<ChannelSigner: Sign, K: KVStorePersister> Persist<ChannelSigner> for K {
index de12d8506f8d2da87becade08ac82514e30dc6d0..69fd14640ae605bc2d481f40760ee48d3a502eb6 100644 (file)
@@ -17,9 +17,9 @@ use core::hash::Hash;
 use sync::Mutex;
 use core::cmp;
 
-use bitcoin::secp256k1::Signature;
-use bitcoin::secp256k1::key::{PublicKey, SecretKey};
+use bitcoin::secp256k1::{PublicKey, SecretKey};
 use bitcoin::secp256k1::constants::{PUBLIC_KEY_SIZE, SECRET_KEY_SIZE, COMPACT_SIGNATURE_SIZE};
+use bitcoin::secp256k1::ecdsa::Signature;
 use bitcoin::blockdata::script::Script;
 use bitcoin::blockdata::transaction::{OutPoint, Transaction, TxOut};
 use bitcoin::consensus;
index 0ff0fc31aff93369c7836097ba3eb2a9844ef4dd..3682a0e8f0af02d1ac654853bff16e3096f4025a 100644 (file)
@@ -18,7 +18,7 @@ use chain::channelmonitor::MonitorEvent;
 use chain::transaction::OutPoint;
 use chain::keysinterface;
 use ln::features::{ChannelFeatures, InitFeatures};
-use ln::msgs;
+use ln::{msgs, wire};
 use ln::msgs::OptionalField;
 use ln::script::ShutdownScript;
 use routing::scoring::FixedPenaltyScorer;
@@ -35,8 +35,8 @@ use bitcoin::blockdata::block::BlockHeader;
 use bitcoin::network::constants::Network;
 use bitcoin::hash_types::{BlockHash, Txid};
 
-use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1, Signature};
-use bitcoin::secp256k1::recovery::RecoverableSignature;
+use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1, ecdsa::Signature};
+use bitcoin::secp256k1::ecdsa::RecoverableSignature;
 
 use regex;
 
@@ -49,6 +49,9 @@ use core::{cmp, mem};
 use bitcoin::bech32::u5;
 use chain::keysinterface::{InMemorySigner, Recipient, KeyMaterial};
 
+#[cfg(feature = "std")]
+use std::time::{SystemTime, UNIX_EPOCH};
+
 pub struct TestVecWriter(pub Vec<u8>);
 impl Writer for TestVecWriter {
        fn write_all(&mut self, buf: &[u8]) -> Result<(), io::Error> {
@@ -246,37 +249,106 @@ impl chaininterface::BroadcasterInterface for TestBroadcaster {
 
 pub struct TestChannelMessageHandler {
        pub pending_events: Mutex<Vec<events::MessageSendEvent>>,
+       expected_recv_msgs: Mutex<Option<Vec<wire::Message<()>>>>,
 }
 
 impl TestChannelMessageHandler {
        pub fn new() -> Self {
                TestChannelMessageHandler {
                        pending_events: Mutex::new(Vec::new()),
+                       expected_recv_msgs: Mutex::new(None),
+               }
+       }
+
+       #[cfg(test)]
+       pub(crate) fn expect_receive_msg(&self, ev: wire::Message<()>) {
+               let mut expected_msgs = self.expected_recv_msgs.lock().unwrap();
+               if expected_msgs.is_none() { *expected_msgs = Some(Vec::new()); }
+               expected_msgs.as_mut().unwrap().push(ev);
+       }
+
+       fn received_msg(&self, _ev: wire::Message<()>) {
+               let mut msgs = self.expected_recv_msgs.lock().unwrap();
+               if msgs.is_none() { return; }
+               assert!(!msgs.as_ref().unwrap().is_empty(), "Received message when we weren't expecting one");
+               #[cfg(test)]
+               assert_eq!(msgs.as_ref().unwrap()[0], _ev);
+               msgs.as_mut().unwrap().remove(0);
+       }
+}
+
+impl Drop for TestChannelMessageHandler {
+       fn drop(&mut self) {
+               let l = self.expected_recv_msgs.lock().unwrap();
+               #[cfg(feature = "std")]
+               {
+                       if !std::thread::panicking() {
+                               assert!(l.is_none() || l.as_ref().unwrap().is_empty());
+                       }
                }
        }
 }
 
 impl msgs::ChannelMessageHandler for TestChannelMessageHandler {
-       fn handle_open_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, _msg: &msgs::OpenChannel) {}
-       fn handle_accept_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, _msg: &msgs::AcceptChannel) {}
-       fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingCreated) {}
-       fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingSigned) {}
-       fn handle_funding_locked(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingLocked) {}
-       fn handle_shutdown(&self, _their_node_id: &PublicKey, _their_features: &InitFeatures, _msg: &msgs::Shutdown) {}
-       fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::ClosingSigned) {}
-       fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateAddHTLC) {}
-       fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFulfillHTLC) {}
-       fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailHTLC) {}
-       fn handle_update_fail_malformed_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailMalformedHTLC) {}
-       fn handle_commitment_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::CommitmentSigned) {}
-       fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, _msg: &msgs::RevokeAndACK) {}
-       fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFee) {}
-       fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelUpdate) {}
-       fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &msgs::AnnouncementSignatures) {}
-       fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelReestablish) {}
+       fn handle_open_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, msg: &msgs::OpenChannel) {
+               self.received_msg(wire::Message::OpenChannel(msg.clone()));
+       }
+       fn handle_accept_channel(&self, _their_node_id: &PublicKey, _their_features: InitFeatures, msg: &msgs::AcceptChannel) {
+               self.received_msg(wire::Message::AcceptChannel(msg.clone()));
+       }
+       fn handle_funding_created(&self, _their_node_id: &PublicKey, msg: &msgs::FundingCreated) {
+               self.received_msg(wire::Message::FundingCreated(msg.clone()));
+       }
+       fn handle_funding_signed(&self, _their_node_id: &PublicKey, msg: &msgs::FundingSigned) {
+               self.received_msg(wire::Message::FundingSigned(msg.clone()));
+       }
+       fn handle_funding_locked(&self, _their_node_id: &PublicKey, msg: &msgs::FundingLocked) {
+               self.received_msg(wire::Message::FundingLocked(msg.clone()));
+       }
+       fn handle_shutdown(&self, _their_node_id: &PublicKey, _their_features: &InitFeatures, msg: &msgs::Shutdown) {
+               self.received_msg(wire::Message::Shutdown(msg.clone()));
+       }
+       fn handle_closing_signed(&self, _their_node_id: &PublicKey, msg: &msgs::ClosingSigned) {
+               self.received_msg(wire::Message::ClosingSigned(msg.clone()));
+       }
+       fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) {
+               self.received_msg(wire::Message::UpdateAddHTLC(msg.clone()));
+       }
+       fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) {
+               self.received_msg(wire::Message::UpdateFulfillHTLC(msg.clone()));
+       }
+       fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) {
+               self.received_msg(wire::Message::UpdateFailHTLC(msg.clone()));
+       }
+       fn handle_update_fail_malformed_htlc(&self, _their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) {
+               self.received_msg(wire::Message::UpdateFailMalformedHTLC(msg.clone()));
+       }
+       fn handle_commitment_signed(&self, _their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) {
+               self.received_msg(wire::Message::CommitmentSigned(msg.clone()));
+       }
+       fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) {
+               self.received_msg(wire::Message::RevokeAndACK(msg.clone()));
+       }
+       fn handle_update_fee(&self, _their_node_id: &PublicKey, msg: &msgs::UpdateFee) {
+               self.received_msg(wire::Message::UpdateFee(msg.clone()));
+       }
+       fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelUpdate) {
+               // Don't call `received_msg` here as `TestRoutingMessageHandler` generates these sometimes
+       }
+       fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) {
+               self.received_msg(wire::Message::AnnouncementSignatures(msg.clone()));
+       }
+       fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
+               self.received_msg(wire::Message::ChannelReestablish(msg.clone()));
+       }
        fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
-       fn peer_connected(&self, _their_node_id: &PublicKey, _msg: &msgs::Init) {}
-       fn handle_error(&self, _their_node_id: &PublicKey, _msg: &msgs::ErrorMessage) {}
+       fn peer_connected(&self, _their_node_id: &PublicKey, _msg: &msgs::Init) {
+               // Don't bother with `received_msg` for Init as its auto-generated and we don't want to
+               // bother re-generating the expected Init message in all tests.
+       }
+       fn handle_error(&self, _their_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
+               self.received_msg(wire::Message::Error(msg.clone()));
+       }
 }
 
 impl events::MessageSendEventsProvider for TestChannelMessageHandler {
@@ -341,6 +413,7 @@ fn get_dummy_channel_update(short_chan_id: u64) -> msgs::ChannelUpdate {
 pub struct TestRoutingMessageHandler {
        pub chan_upds_recvd: AtomicUsize,
        pub chan_anns_recvd: AtomicUsize,
+       pub pending_events: Mutex<Vec<events::MessageSendEvent>>,
        pub request_full_sync: AtomicBool,
 }
 
@@ -349,6 +422,7 @@ impl TestRoutingMessageHandler {
                TestRoutingMessageHandler {
                        chan_upds_recvd: AtomicUsize::new(0),
                        chan_anns_recvd: AtomicUsize::new(0),
+                       pending_events: Mutex::new(vec![]),
                        request_full_sync: AtomicBool::new(false),
                }
        }
@@ -384,7 +458,35 @@ impl msgs::RoutingMessageHandler for TestRoutingMessageHandler {
                Vec::new()
        }
 
-       fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &msgs::Init) {}
+       fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &msgs::Init) {
+               if !init_msg.features.supports_gossip_queries() {
+                       return ();
+               }
+
+               let should_request_full_sync = self.request_full_sync.load(Ordering::Acquire);
+
+               #[allow(unused_mut, unused_assignments)]
+               let mut gossip_start_time = 0;
+               #[cfg(feature = "std")]
+               {
+                       gossip_start_time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs();
+                       if should_request_full_sync {
+                               gossip_start_time -= 60 * 60 * 24 * 7 * 2; // 2 weeks ago
+                       } else {
+                               gossip_start_time -= 60 * 60; // an hour ago
+                       }
+               }
+
+               let mut pending_events = self.pending_events.lock().unwrap();
+               pending_events.push(events::MessageSendEvent::SendGossipTimestampFilter {
+                       node_id: their_node_id.clone(),
+                       msg: msgs::GossipTimestampFilter {
+                               chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+                               first_timestamp: gossip_start_time as u32,
+                               timestamp_range: u32::max_value(),
+                       },
+               });
+       }
 
        fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyChannelRange) -> Result<(), msgs::LightningError> {
                Ok(())
@@ -405,7 +507,10 @@ impl msgs::RoutingMessageHandler for TestRoutingMessageHandler {
 
 impl events::MessageSendEventsProvider for TestRoutingMessageHandler {
        fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
-               vec![]
+               let mut ret = Vec::new();
+               let mut pending_events = self.pending_events.lock().unwrap();
+               core::mem::swap(&mut ret, &mut pending_events);
+               ret
        }
 }
 
index 4d444d7c2aba29f20aa8ad6bee4e4483e008024d..12768543783cd148b7874a0009aa6b44650cd71a 100644 (file)
@@ -50,7 +50,7 @@ pub(crate) fn maybe_add_change_output(tx: &mut Transaction, input_value: u64, wi
                value: 0,
        };
        let change_len = change_output.consensus_encode(&mut sink()).unwrap();
-       let starting_weight = tx.get_weight() + WITNESS_FLAG_BYTES as usize + witness_max_weight;
+       let starting_weight = tx.weight() + WITNESS_FLAG_BYTES as usize + witness_max_weight;
        let mut weight_with_change: i64 = starting_weight as i64 + change_len as i64 * 4;
        // Include any extra bytes required to push an extra output.
        weight_with_change += (VarInt(tx.output.len() as u64 + 1).len() - VarInt(tx.output.len() as u64).len()) as i64 * 4;
@@ -77,6 +77,7 @@ mod tests {
 
        use bitcoin::hashes::sha256d::Hash as Sha256dHash;
        use bitcoin::hashes::Hash;
+       use bitcoin::Witness;
 
        use hex::decode;
 
@@ -230,7 +231,7 @@ mod tests {
                let output_spk = Script::new_p2pkh(&PubkeyHash::hash(&[0; 0]));
                assert_eq!(output_spk.dust_value().as_sat(), 546);
                // 9 sats isn't enough to pay fee on a dummy transaction...
-               assert_eq!(tx.get_weight() as u64, 40); // ie 10 vbytes
+               assert_eq!(tx.weight() as u64, 40); // ie 10 vbytes
                assert!(maybe_add_change_output(&mut tx, 9, 0, 250, output_spk.clone()).is_err());
                assert_eq!(tx.wtxid(), orig_wtxid); // Failure doesn't change the transaction
                // but 10-564 is, just not enough to add a change output...
@@ -250,7 +251,7 @@ mod tests {
                assert_eq!(tx.output.len(), 1);
                assert_eq!(tx.output[0].value, 546);
                assert_eq!(tx.output[0].script_pubkey, output_spk);
-               assert_eq!(tx.get_weight() / 4, 590-546); // New weight is exactly the fee we wanted.
+               assert_eq!(tx.weight() / 4, 590-546); // New weight is exactly the fee we wanted.
 
                tx.output.pop();
                assert_eq!(tx.wtxid(), orig_wtxid); // The only change is the addition of one output.
@@ -260,12 +261,12 @@ mod tests {
        fn test_tx_extra_outputs() {
                // Check that we correctly handle existing outputs
                let mut tx = Transaction { version: 2, lock_time: 0, input: vec![TxIn {
-                       previous_output: OutPoint::new(Txid::from_hash(Sha256dHash::default()), 0), script_sig: Script::new(), witness: Vec::new(), sequence: 0,
+                       previous_output: OutPoint::new(Txid::from_hash(Sha256dHash::default()), 0), script_sig: Script::new(), witness: Witness::new(), sequence: 0,
                }], output: vec![TxOut {
                        script_pubkey: Builder::new().push_int(1).into_script(), value: 1000
                }] };
                let orig_wtxid = tx.wtxid();
-               let orig_weight = tx.get_weight();
+               let orig_weight = tx.weight();
                assert_eq!(orig_weight / 4, 61);
 
                assert_eq!(Builder::new().push_int(2).into_script().dust_value().as_sat(), 474);
@@ -284,7 +285,7 @@ mod tests {
                assert_eq!(tx.output.len(), 2);
                assert_eq!(tx.output[1].value, 474);
                assert_eq!(tx.output[1].script_pubkey, Builder::new().push_int(2).into_script());
-               assert_eq!(tx.get_weight() - orig_weight, 40); // Weight difference matches what we had to add above
+               assert_eq!(tx.weight() - orig_weight, 40); // Weight difference matches what we had to add above
                tx.output.pop();
                assert_eq!(tx.wtxid(), orig_wtxid); // The only change is the addition of one output.
        }