Merge pull request #889 from jkczyz/2021-04-electrum-trait
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Fri, 23 Apr 2021 19:13:23 +0000 (19:13 +0000)
committerGitHub <noreply@github.com>
Fri, 23 Apr 2021 19:13:23 +0000 (19:13 +0000)
Define chain::Confirm trait for use by Electrum clients

22 files changed:
.github/workflows/build.yml
Cargo.toml
background-processor/Cargo.toml [deleted file]
background-processor/src/lib.rs [deleted file]
fuzz/src/bin/gen_target.sh
fuzz/src/bin/zbase32_target.rs [new file with mode: 0644]
fuzz/src/lib.rs
fuzz/src/router.rs
fuzz/src/zbase32.rs [new file with mode: 0644]
fuzz/targets.h
lightning-background-processor/Cargo.toml [new file with mode: 0644]
lightning-background-processor/src/lib.rs [new file with mode: 0644]
lightning-invoice/Cargo.toml
lightning-invoice/src/de.rs
lightning-invoice/src/lib.rs
lightning-invoice/src/ser.rs
lightning/src/chain/channelmonitor.rs
lightning/src/ln/peer_handler.rs
lightning/src/routing/router.rs
lightning/src/util/message_signing.rs [new file with mode: 0644]
lightning/src/util/mod.rs
lightning/src/util/zbase32.rs [new file with mode: 0644]

index a379d1cd64ba73c4c07c57336f698ce245214724..d71a47da4aaa163af589afb56c205285c6934569 100644 (file)
@@ -47,7 +47,7 @@ jobs:
         run: RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always
       - name: Build on Rust ${{ matrix.toolchain }}
         if: "! matrix.build-net-tokio"
-        run: cargo build --verbose  --color always -p lightning
+        run: cargo build --verbose  --color always -p lightning && cargo build --verbose  --color always -p lightning-invoice
       - name: Build Block Sync Clients on Rust ${{ matrix.toolchain }} with features
         if: "matrix.build-net-tokio && !matrix.coverage"
         run: |
@@ -74,7 +74,7 @@ jobs:
         run: RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always
       - name: Test on Rust ${{ matrix.toolchain }}
         if: "! matrix.build-net-tokio"
-        run: cargo test --verbose --color always  -p lightning
+        run: cargo test --verbose --color always  -p lightning && cargo test --verbose --color always  -p lightning-invoice
       - name: Test Block Sync Clients on Rust ${{ matrix.toolchain }} with features
         if: "matrix.build-net-tokio && !matrix.coverage"
         run: |
index e1807eab684e29e4a7bc6f0dd7839281379dfb65..597d4efbabb4e8cca00458db04ddfa2b3bb29c9c 100644 (file)
@@ -6,7 +6,7 @@ members = [
     "lightning-invoice",
     "lightning-net-tokio",
     "lightning-persister",
-    "background-processor",
+    "lightning-background-processor",
 ]
 
 # Our tests do actual crypo and lots of work, the tradeoff for -O1 is well worth it.
diff --git a/background-processor/Cargo.toml b/background-processor/Cargo.toml
deleted file mode 100644 (file)
index 89ac216..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-[package]
-name = "lightning-background-processor"
-version = "0.0.13"
-authors = ["Valentine Wallace <vwallace@protonmail.com>"]
-license = "MIT OR Apache-2.0"
-repository = "http://github.com/rust-bitcoin/rust-lightning"
-description = """
-Utilities to perform required background tasks for Rust Lightning.
-"""
-edition = "2018"
-
-[dependencies]
-bitcoin = "0.26"
-lightning = { version = "0.0.13", path = "../lightning", features = ["allow_wallclock_use"] }
-lightning-persister = { version = "0.0.13", path = "../lightning-persister" }
-
-[dev-dependencies]
-lightning = { version = "0.0.13", path = "../lightning", features = ["_test_utils"] }
-
-[dev-dependencies.bitcoin]
-version = "0.26"
-features = ["bitcoinconsensus"]
diff --git a/background-processor/src/lib.rs b/background-processor/src/lib.rs
deleted file mode 100644 (file)
index 30d9f6c..0000000
+++ /dev/null
@@ -1,335 +0,0 @@
-//! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
-//! running properly, and (2) either can or should be run in the background. See docs for
-//! [`BackgroundProcessor`] for more details on the nitty-gritty.
-
-#![deny(broken_intra_doc_links)]
-#![deny(missing_docs)]
-#![deny(unsafe_code)]
-
-#[macro_use] extern crate lightning;
-
-use lightning::chain;
-use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
-use lightning::chain::keysinterface::{Sign, KeysInterface};
-use lightning::ln::channelmanager::ChannelManager;
-use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
-use lightning::ln::peer_handler::{PeerManager, SocketDescriptor};
-use lightning::util::logger::Logger;
-use std::sync::Arc;
-use std::sync::atomic::{AtomicBool, Ordering};
-use std::thread;
-use std::thread::JoinHandle;
-use std::time::{Duration, Instant};
-
-/// BackgroundProcessor takes care of tasks that (1) need to happen periodically to keep
-/// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
-/// responsibilities are:
-/// * Monitoring whether the ChannelManager needs to be re-persisted to disk, and if so,
-///   writing it to disk/backups by invoking the callback given to it at startup.
-///   ChannelManager persistence should be done in the background.
-/// * Calling `ChannelManager::timer_tick_occurred()` and
-///   `PeerManager::timer_tick_occurred()` every minute (can be done in the
-///   background).
-///
-/// Note that if ChannelManager persistence fails and the persisted manager becomes out-of-date,
-/// then there is a risk of channels force-closing on startup when the manager realizes it's
-/// outdated. However, as long as `ChannelMonitor` backups are sound, no funds besides those used
-/// for unilateral chain closure fees are at risk.
-pub struct BackgroundProcessor {
-       stop_thread: Arc<AtomicBool>,
-       /// May be used to retrieve and handle the error if `BackgroundProcessor`'s thread
-       /// exits due to an error while persisting.
-       pub thread_handle: JoinHandle<Result<(), std::io::Error>>,
-}
-
-#[cfg(not(test))]
-const FRESHNESS_TIMER: u64 = 60;
-#[cfg(test)]
-const FRESHNESS_TIMER: u64 = 1;
-
-impl BackgroundProcessor {
-       /// Start a background thread that takes care of responsibilities enumerated in the top-level
-       /// documentation.
-       ///
-       /// If `persist_manager` returns an error, then this thread will return said error (and
-       /// `start()` will need to be called again to restart the `BackgroundProcessor`). Users should
-       /// wait on [`thread_handle`]'s `join()` method to be able to tell if and when an error is
-       /// returned, or implement `persist_manager` such that an error is never returned to the
-       /// `BackgroundProcessor`
-       ///
-       /// `persist_manager` is responsible for writing out the [`ChannelManager`] to disk, and/or
-       /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
-       /// [`ChannelManager`]. See [`FilesystemPersister::persist_manager`] for Rust-Lightning's
-       /// provided implementation.
-       ///
-       /// [`thread_handle`]: BackgroundProcessor::thread_handle
-       /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
-       /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
-       /// [`FilesystemPersister::persist_manager`]: lightning_persister::FilesystemPersister::persist_manager
-       pub fn start<PM, Signer, M, T, K, F, L, Descriptor: 'static + SocketDescriptor + Send, CM, RM>(
-               persist_channel_manager: PM,
-               channel_manager: Arc<ChannelManager<Signer, Arc<M>, Arc<T>, Arc<K>, Arc<F>, Arc<L>>>,
-               peer_manager: Arc<PeerManager<Descriptor, Arc<CM>, Arc<RM>, Arc<L>>>, logger: Arc<L>,
-       ) -> Self
-       where
-               Signer: 'static + Sign,
-               M: 'static + chain::Watch<Signer>,
-               T: 'static + BroadcasterInterface,
-               K: 'static + KeysInterface<Signer = Signer>,
-               F: 'static + FeeEstimator,
-               L: 'static + Logger,
-               CM: 'static + ChannelMessageHandler,
-               RM: 'static + RoutingMessageHandler,
-               PM: 'static
-                       + Send
-                       + Fn(
-                               &ChannelManager<Signer, Arc<M>, Arc<T>, Arc<K>, Arc<F>, Arc<L>>,
-                       ) -> Result<(), std::io::Error>,
-       {
-               let stop_thread = Arc::new(AtomicBool::new(false));
-               let stop_thread_clone = stop_thread.clone();
-               let handle = thread::spawn(move || -> Result<(), std::io::Error> {
-                       let mut current_time = Instant::now();
-                       loop {
-                               peer_manager.process_events();
-                               let updates_available =
-                                       channel_manager.await_persistable_update_timeout(Duration::from_millis(100));
-                               if updates_available {
-                                       persist_channel_manager(&*channel_manager)?;
-                               }
-                               // Exit the loop if the background processor was requested to stop.
-                               if stop_thread.load(Ordering::Acquire) == true {
-                                       log_trace!(logger, "Terminating background processor.");
-                                       return Ok(());
-                               }
-                               if current_time.elapsed().as_secs() > FRESHNESS_TIMER {
-                                       log_trace!(logger, "Calling ChannelManager's and PeerManager's timer_tick_occurred");
-                                       channel_manager.timer_tick_occurred();
-                                       peer_manager.timer_tick_occurred();
-                                       current_time = Instant::now();
-                               }
-                       }
-               });
-               Self { stop_thread: stop_thread_clone, thread_handle: handle }
-       }
-
-       /// Stop `BackgroundProcessor`'s thread.
-       pub fn stop(self) -> Result<(), std::io::Error> {
-               self.stop_thread.store(true, Ordering::Release);
-               self.thread_handle.join().unwrap()
-       }
-}
-
-#[cfg(test)]
-mod tests {
-       use bitcoin::blockdata::constants::genesis_block;
-       use bitcoin::blockdata::transaction::{Transaction, TxOut};
-       use bitcoin::network::constants::Network;
-       use lightning::chain;
-       use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
-       use lightning::chain::chainmonitor;
-       use lightning::chain::keysinterface::{Sign, InMemorySigner, KeysInterface, KeysManager};
-       use lightning::chain::transaction::OutPoint;
-       use lightning::get_event_msg;
-       use lightning::ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, SimpleArcChannelManager};
-       use lightning::ln::features::InitFeatures;
-       use lightning::ln::msgs::ChannelMessageHandler;
-       use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor};
-       use lightning::util::config::UserConfig;
-       use lightning::util::events::{Event, EventsProvider, MessageSendEventsProvider, MessageSendEvent};
-       use lightning::util::logger::Logger;
-       use lightning::util::ser::Writeable;
-       use lightning::util::test_utils;
-       use lightning_persister::FilesystemPersister;
-       use std::fs;
-       use std::path::PathBuf;
-       use std::sync::{Arc, Mutex};
-       use std::time::Duration;
-       use super::BackgroundProcessor;
-
-       #[derive(Clone, Eq, Hash, PartialEq)]
-       struct TestDescriptor{}
-       impl SocketDescriptor for TestDescriptor {
-               fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
-                       0
-               }
-
-               fn disconnect_socket(&mut self) {}
-       }
-
-       type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
-
-       struct Node {
-               node: Arc<SimpleArcChannelManager<ChainMonitor, test_utils::TestBroadcaster, test_utils::TestFeeEstimator, test_utils::TestLogger>>,
-               peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, Arc<test_utils::TestLogger>>>,
-               persister: Arc<FilesystemPersister>,
-               logger: Arc<test_utils::TestLogger>,
-       }
-
-       impl Drop for Node {
-               fn drop(&mut self) {
-                       let data_dir = self.persister.get_data_dir();
-                       match fs::remove_dir_all(data_dir.clone()) {
-                               Err(e) => println!("Failed to remove test persister directory {}: {}", data_dir, e),
-                               _ => {}
-                       }
-               }
-       }
-
-       fn get_full_filepath(filepath: String, filename: String) -> String {
-               let mut path = PathBuf::from(filepath);
-               path.push(filename);
-               path.to_str().unwrap().to_string()
-       }
-
-       fn create_nodes(num_nodes: usize, persist_dir: String) -> Vec<Node> {
-               let mut nodes = Vec::new();
-               for i in 0..num_nodes {
-                       let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
-                       let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
-                       let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet));
-                       let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
-                       let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", persist_dir, i)));
-                       let seed = [i as u8; 32];
-                       let network = Network::Testnet;
-                       let now = Duration::from_secs(genesis_block(network).header.time as u64);
-                       let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
-                       let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone()));
-                       let params = ChainParameters {
-                               network,
-                               best_block: BestBlock::from_genesis(network),
-                       };
-                       let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster, logger.clone(), keys_manager.clone(), UserConfig::default(), params));
-                       let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new() )};
-                       let peer_manager = Arc::new(PeerManager::new(msg_handler, keys_manager.get_node_secret(), &seed, logger.clone()));
-                       let node = Node { node: manager, peer_manager, persister, logger };
-                       nodes.push(node);
-               }
-               nodes
-       }
-
-       macro_rules! open_channel {
-               ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
-                       $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None).unwrap();
-                       $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), InitFeatures::known(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
-                       $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), InitFeatures::known(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
-                       let events = $node_a.node.get_and_clear_pending_events();
-                       assert_eq!(events.len(), 1);
-                       let (temporary_channel_id, tx) = match events[0] {
-                               Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, user_channel_id } => {
-                                       assert_eq!(*channel_value_satoshis, $channel_value);
-                                       assert_eq!(user_channel_id, 42);
-
-                                       let tx = Transaction { version: 1 as i32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
-                                               value: *channel_value_satoshis, script_pubkey: output_script.clone(),
-                                       }]};
-                                       (*temporary_channel_id, tx)
-                               },
-                               _ => panic!("Unexpected event"),
-                       };
-
-                       $node_a.node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
-                       $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
-                       $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
-                       tx
-               }}
-       }
-
-       #[test]
-       fn test_background_processor() {
-               // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
-               // updates. Also test that when new updates are available, the manager signals that it needs
-               // re-persistence and is successfully re-persisted.
-               let nodes = create_nodes(2, "test_background_processor".to_string());
-
-               // Initiate the background processors to watch each node.
-               let data_dir = nodes[0].persister.get_data_dir();
-               let callback = move |node: &ChannelManager<InMemorySigner, Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>>| FilesystemPersister::persist_manager(data_dir.clone(), node);
-               let bg_processor = BackgroundProcessor::start(callback, nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
-
-               // Go through the channel creation process until each node should have something persisted.
-               let tx = open_channel!(nodes[0], nodes[1], 100000);
-
-               macro_rules! check_persisted_data {
-                       ($node: expr, $filepath: expr, $expected_bytes: expr) => {
-                               match $node.write(&mut $expected_bytes) {
-                                       Ok(()) => {
-                                               loop {
-                                                       match std::fs::read($filepath) {
-                                                               Ok(bytes) => {
-                                                                       if bytes == $expected_bytes {
-                                                                               break
-                                                                       } else {
-                                                                               continue
-                                                                       }
-                                                               },
-                                                               Err(_) => continue
-                                                       }
-                                               }
-                                       },
-                                       Err(e) => panic!("Unexpected error: {}", e)
-                               }
-                       }
-               }
-
-               // Check that the initial channel manager data is persisted as expected.
-               let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "manager".to_string());
-               let mut expected_bytes = Vec::new();
-               check_persisted_data!(nodes[0].node, filepath.clone(), expected_bytes);
-               loop {
-                       if !nodes[0].node.get_persistence_condvar_value() { break }
-               }
-
-               // Force-close the channel.
-               nodes[0].node.force_close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).unwrap();
-
-               // Check that the force-close updates are persisted.
-               let mut expected_bytes = Vec::new();
-               check_persisted_data!(nodes[0].node, filepath.clone(), expected_bytes);
-               loop {
-                       if !nodes[0].node.get_persistence_condvar_value() { break }
-               }
-
-               assert!(bg_processor.stop().is_ok());
-       }
-
-       #[test]
-       fn test_timer_tick_called() {
-               // Test that ChannelManager's and PeerManager's `timer_tick_occurred` is called every
-               // `FRESHNESS_TIMER`.
-               let nodes = create_nodes(1, "test_timer_tick_called".to_string());
-               let data_dir = nodes[0].persister.get_data_dir();
-               let callback = move |node: &ChannelManager<InMemorySigner, Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>>| FilesystemPersister::persist_manager(data_dir.clone(), node);
-               let bg_processor = BackgroundProcessor::start(callback, nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
-               loop {
-                       let log_entries = nodes[0].logger.lines.lock().unwrap();
-                       let desired_log = "Calling ChannelManager's and PeerManager's timer_tick_occurred".to_string();
-                       if log_entries.get(&("lightning_background_processor".to_string(), desired_log)).is_some() {
-                               break
-                       }
-               }
-
-               assert!(bg_processor.stop().is_ok());
-       }
-
-       #[test]
-       fn test_persist_error() {
-               // Test that if we encounter an error during manager persistence, the thread panics.
-               fn persist_manager<Signer, M, T, K, F, L>(_data: &ChannelManager<Signer, Arc<M>, Arc<T>, Arc<K>, Arc<F>, Arc<L>>) -> Result<(), std::io::Error>
-               where Signer: 'static + Sign,
-                     M: 'static + chain::Watch<Signer>,
-                     T: 'static + BroadcasterInterface,
-                     K: 'static + KeysInterface<Signer=Signer>,
-                     F: 'static + FeeEstimator,
-                     L: 'static + Logger,
-               {
-                       Err(std::io::Error::new(std::io::ErrorKind::Other, "test"))
-               }
-
-               let nodes = create_nodes(2, "test_persist_error".to_string());
-               let bg_processor = BackgroundProcessor::start(persist_manager, nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
-               open_channel!(nodes[0], nodes[1], 100000);
-
-               let _ = bg_processor.thread_handle.join().unwrap().expect_err("Errored persisting manager: test");
-       }
-}
index 8cb52f6687bfc32b3615136a82a04bd644a4d176..eb07df6342f86dc6d99904953f376f414de5289f 100755 (executable)
@@ -11,6 +11,7 @@ GEN_TEST chanmon_consistency
 GEN_TEST full_stack
 GEN_TEST peer_crypt
 GEN_TEST router
+GEN_TEST zbase32
 
 GEN_TEST msg_accept_channel msg_targets::
 GEN_TEST msg_announcement_signatures msg_targets::
diff --git a/fuzz/src/bin/zbase32_target.rs b/fuzz/src/bin/zbase32_target.rs
new file mode 100644 (file)
index 0000000..ae96ce4
--- /dev/null
@@ -0,0 +1,102 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+// This file is auto-generated by gen_target.sh based on target_template.txt
+// To modify it, modify target_template.txt and run gen_target.sh instead.
+
+#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+
+extern crate lightning_fuzz;
+use lightning_fuzz::zbase32::*;
+
+#[cfg(feature = "afl")]
+#[macro_use] extern crate afl;
+#[cfg(feature = "afl")]
+fn main() {
+       fuzz!(|data| {
+               zbase32_run(data.as_ptr(), data.len());
+       });
+}
+
+#[cfg(feature = "honggfuzz")]
+#[macro_use] extern crate honggfuzz;
+#[cfg(feature = "honggfuzz")]
+fn main() {
+       loop {
+               fuzz!(|data| {
+                       zbase32_run(data.as_ptr(), data.len());
+               });
+       }
+}
+
+#[cfg(feature = "libfuzzer_fuzz")]
+#[macro_use] extern crate libfuzzer_sys;
+#[cfg(feature = "libfuzzer_fuzz")]
+fuzz_target!(|data: &[u8]| {
+       zbase32_run(data.as_ptr(), data.len());
+});
+
+#[cfg(feature = "stdin_fuzz")]
+fn main() {
+       use std::io::Read;
+
+       let mut data = Vec::with_capacity(8192);
+       std::io::stdin().read_to_end(&mut data).unwrap();
+       zbase32_run(data.as_ptr(), data.len());
+}
+
+#[test]
+fn run_test_cases() {
+       use std::fs;
+       use std::io::Read;
+       use lightning_fuzz::utils::test_logger::StringBuffer;
+
+       use std::sync::{atomic, Arc};
+       {
+               let data: Vec<u8> = vec![0];
+               zbase32_run(data.as_ptr(), data.len());
+       }
+       let mut threads = Vec::new();
+       let threads_running = Arc::new(atomic::AtomicUsize::new(0));
+       if let Ok(tests) = fs::read_dir("test_cases/zbase32") {
+               for test in tests {
+                       let mut data: Vec<u8> = Vec::new();
+                       let path = test.unwrap().path();
+                       fs::File::open(&path).unwrap().read_to_end(&mut data).unwrap();
+                       threads_running.fetch_add(1, atomic::Ordering::AcqRel);
+
+                       let thread_count_ref = Arc::clone(&threads_running);
+                       let main_thread_ref = std::thread::current();
+                       threads.push((path.file_name().unwrap().to_str().unwrap().to_string(),
+                               std::thread::spawn(move || {
+                                       let string_logger = StringBuffer::new();
+
+                                       let panic_logger = string_logger.clone();
+                                       let res = if ::std::panic::catch_unwind(move || {
+                                               zbase32_test(&data, panic_logger);
+                                       }).is_err() {
+                                               Some(string_logger.into_string())
+                                       } else { None };
+                                       thread_count_ref.fetch_sub(1, atomic::Ordering::AcqRel);
+                                       main_thread_ref.unpark();
+                                       res
+                               })
+                       ));
+                       while threads_running.load(atomic::Ordering::Acquire) > 32 {
+                               std::thread::park();
+                       }
+               }
+       }
+       for (test, thread) in threads.drain(..) {
+               if let Some(output) = thread.join().unwrap() {
+                       println!("Output of {}:\n{}", test, output);
+                       panic!();
+               }
+       }
+}
index 5fe50520c57c70595f8e353339d53ecb8d4329b3..a0cc42b8189f9ae8837fb8ea64106c902185781d 100644 (file)
@@ -18,5 +18,6 @@ pub mod chanmon_consistency;
 pub mod full_stack;
 pub mod peer_crypt;
 pub mod router;
+pub mod zbase32;
 
 pub mod msg_targets;
index fb720c9916c2d7b38ee653df9fea675f3f8ea23f..e80e080f31786bc76b40b10bc9b6fb96d27c8e2a 100644 (file)
@@ -15,7 +15,7 @@ use lightning::chain;
 use lightning::ln::channelmanager::ChannelDetails;
 use lightning::ln::features::InitFeatures;
 use lightning::ln::msgs;
-use lightning::routing::router::{get_route, RouteHint};
+use lightning::routing::router::{get_route, RouteHintHop};
 use lightning::util::logger::Logger;
 use lightning::util::ser::Readable;
 use lightning::routing::network_graph::{NetworkGraph, RoutingFees};
@@ -224,7 +224,7 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                        for _ in 0..count {
                                                scid += 1;
                                                let rnid = node_pks.iter().skip(slice_to_be16(get_slice!(2))as usize % node_pks.len()).next().unwrap();
-                                               last_hops_vec.push(RouteHint {
+                                               last_hops_vec.push(RouteHintHop {
                                                        src_node_id: *rnid,
                                                        short_channel_id: scid,
                                                        fees: RoutingFees {
diff --git a/fuzz/src/zbase32.rs b/fuzz/src/zbase32.rs
new file mode 100644 (file)
index 0000000..305485b
--- /dev/null
@@ -0,0 +1,33 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+use lightning::util::zbase32;
+
+use utils::test_logger;
+
+#[inline]
+pub fn do_test(data: &[u8]) {
+       let res = zbase32::encode(data);
+       assert_eq!(&zbase32::decode(&res).unwrap()[..], data);
+
+       if let Ok(s) = std::str::from_utf8(data) {
+               if let Ok(decoded) = zbase32::decode(s) {
+                       assert_eq!(&zbase32::encode(&decoded), &s.to_ascii_lowercase());
+               }
+       }
+}
+
+pub fn zbase32_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
+       do_test(data);
+}
+
+#[no_mangle]
+pub extern "C" fn zbase32_run(data: *const u8, datalen: usize) {
+       do_test(unsafe { std::slice::from_raw_parts(data, datalen) });
+}
index 7ca3e93f3633fdc2c02c99b028a16f9869352e5c..5d45e3d02388ea9a1659bf0cbb6e88e379d2f940 100644 (file)
@@ -4,6 +4,7 @@ void chanmon_consistency_run(const unsigned char* data, size_t data_len);
 void full_stack_run(const unsigned char* data, size_t data_len);
 void peer_crypt_run(const unsigned char* data, size_t data_len);
 void router_run(const unsigned char* data, size_t data_len);
+void zbase32_run(const unsigned char* data, size_t data_len);
 void msg_accept_channel_run(const unsigned char* data, size_t data_len);
 void msg_announcement_signatures_run(const unsigned char* data, size_t data_len);
 void msg_channel_reestablish_run(const unsigned char* data, size_t data_len);
diff --git a/lightning-background-processor/Cargo.toml b/lightning-background-processor/Cargo.toml
new file mode 100644 (file)
index 0000000..89ac216
--- /dev/null
@@ -0,0 +1,22 @@
+[package]
+name = "lightning-background-processor"
+version = "0.0.13"
+authors = ["Valentine Wallace <vwallace@protonmail.com>"]
+license = "MIT OR Apache-2.0"
+repository = "http://github.com/rust-bitcoin/rust-lightning"
+description = """
+Utilities to perform required background tasks for Rust Lightning.
+"""
+edition = "2018"
+
+[dependencies]
+bitcoin = "0.26"
+lightning = { version = "0.0.13", path = "../lightning", features = ["allow_wallclock_use"] }
+lightning-persister = { version = "0.0.13", path = "../lightning-persister" }
+
+[dev-dependencies]
+lightning = { version = "0.0.13", path = "../lightning", features = ["_test_utils"] }
+
+[dev-dependencies.bitcoin]
+version = "0.26"
+features = ["bitcoinconsensus"]
diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs
new file mode 100644 (file)
index 0000000..de9aa28
--- /dev/null
@@ -0,0 +1,372 @@
+//! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
+//! running properly, and (2) either can or should be run in the background. See docs for
+//! [`BackgroundProcessor`] for more details on the nitty-gritty.
+
+#![deny(broken_intra_doc_links)]
+#![deny(missing_docs)]
+#![deny(unsafe_code)]
+
+#[macro_use] extern crate lightning;
+
+use lightning::chain;
+use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
+use lightning::chain::keysinterface::{Sign, KeysInterface};
+use lightning::ln::channelmanager::ChannelManager;
+use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
+use lightning::ln::peer_handler::{PeerManager, SocketDescriptor};
+use lightning::util::logger::Logger;
+use std::sync::Arc;
+use std::sync::atomic::{AtomicBool, Ordering};
+use std::thread;
+use std::thread::JoinHandle;
+use std::time::{Duration, Instant};
+use std::ops::Deref;
+
+/// BackgroundProcessor takes care of tasks that (1) need to happen periodically to keep
+/// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
+/// responsibilities are:
+/// * Monitoring whether the ChannelManager needs to be re-persisted to disk, and if so,
+///   writing it to disk/backups by invoking the callback given to it at startup.
+///   ChannelManager persistence should be done in the background.
+/// * Calling `ChannelManager::timer_tick_occurred()` and
+///   `PeerManager::timer_tick_occurred()` every minute (can be done in the
+///   background).
+///
+/// Note that if ChannelManager persistence fails and the persisted manager becomes out-of-date,
+/// then there is a risk of channels force-closing on startup when the manager realizes it's
+/// outdated. However, as long as `ChannelMonitor` backups are sound, no funds besides those used
+/// for unilateral chain closure fees are at risk.
+pub struct BackgroundProcessor {
+       stop_thread: Arc<AtomicBool>,
+       /// May be used to retrieve and handle the error if `BackgroundProcessor`'s thread
+       /// exits due to an error while persisting.
+       pub thread_handle: JoinHandle<Result<(), std::io::Error>>,
+}
+
+#[cfg(not(test))]
+const FRESHNESS_TIMER: u64 = 60;
+#[cfg(test)]
+const FRESHNESS_TIMER: u64 = 1;
+
+/// Trait which handles persisting a [`ChannelManager`] to disk.
+///
+/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
+pub trait ChannelManagerPersister<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
+where
+       M::Target: 'static + chain::Watch<Signer>,
+       T::Target: 'static + BroadcasterInterface,
+       K::Target: 'static + KeysInterface<Signer = Signer>,
+       F::Target: 'static + FeeEstimator,
+       L::Target: 'static + Logger,
+{
+       /// Persist the given [`ChannelManager`] to disk, returning an error if persistence failed
+       /// (which will cause the [`BackgroundProcessor`] which called this method to exit.
+       ///
+       /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
+       fn persist_manager(&self, channel_manager: &ChannelManager<Signer, M, T, K, F, L>) -> Result<(), std::io::Error>;
+}
+
+impl<Fun, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
+ChannelManagerPersister<Signer, M, T, K, F, L> for Fun where
+       M::Target: 'static + chain::Watch<Signer>,
+       T::Target: 'static + BroadcasterInterface,
+       K::Target: 'static + KeysInterface<Signer = Signer>,
+       F::Target: 'static + FeeEstimator,
+       L::Target: 'static + Logger,
+       Fun: Fn(&ChannelManager<Signer, M, T, K, F, L>) -> Result<(), std::io::Error>,
+{
+       fn persist_manager(&self, channel_manager: &ChannelManager<Signer, M, T, K, F, L>) -> Result<(), std::io::Error> {
+               self(channel_manager)
+       }
+}
+
+impl BackgroundProcessor {
+       /// Start a background thread that takes care of responsibilities enumerated in the top-level
+       /// documentation.
+       ///
+       /// If `persist_manager` returns an error, then this thread will return said error (and
+       /// `start()` will need to be called again to restart the `BackgroundProcessor`). Users should
+       /// wait on [`thread_handle`]'s `join()` method to be able to tell if and when an error is
+       /// returned, or implement `persist_manager` such that an error is never returned to the
+       /// `BackgroundProcessor`
+       ///
+       /// `persist_manager` is responsible for writing out the [`ChannelManager`] to disk, and/or
+       /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
+       /// [`ChannelManager`]. See [`FilesystemPersister::persist_manager`] for Rust-Lightning's
+       /// provided implementation.
+       ///
+       /// [`thread_handle`]: BackgroundProcessor::thread_handle
+       /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
+       /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
+       /// [`FilesystemPersister::persist_manager`]: lightning_persister::FilesystemPersister::persist_manager
+       pub fn start<
+               Signer: 'static + Sign,
+               M: 'static + Deref + Send + Sync,
+               T: 'static + Deref + Send + Sync,
+               K: 'static + Deref + Send + Sync,
+               F: 'static + Deref + Send + Sync,
+               L: 'static + Deref + Send + Sync,
+               Descriptor: 'static + SocketDescriptor + Send + Sync,
+               CMH: 'static + Deref + Send + Sync,
+               RMH: 'static + Deref + Send + Sync,
+               CMP: 'static + Send + ChannelManagerPersister<Signer, M, T, K, F, L>,
+               CM: 'static + Deref<Target = ChannelManager<Signer, M, T, K, F, L>> + Send + Sync,
+               PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, L>> + Send + Sync,
+       >
+       (handler: CMP, channel_manager: CM, peer_manager: PM, logger: L) -> Self
+       where
+               M::Target: 'static + chain::Watch<Signer>,
+               T::Target: 'static + BroadcasterInterface,
+               K::Target: 'static + KeysInterface<Signer = Signer>,
+               F::Target: 'static + FeeEstimator,
+               L::Target: 'static + Logger,
+               CMH::Target: 'static + ChannelMessageHandler,
+               RMH::Target: 'static + RoutingMessageHandler,
+       {
+               let stop_thread = Arc::new(AtomicBool::new(false));
+               let stop_thread_clone = stop_thread.clone();
+               let handle = thread::spawn(move || -> Result<(), std::io::Error> {
+                       let mut current_time = Instant::now();
+                       loop {
+                               peer_manager.process_events();
+                               let updates_available =
+                                       channel_manager.await_persistable_update_timeout(Duration::from_millis(100));
+                               if updates_available {
+                                       handler.persist_manager(&*channel_manager)?;
+                               }
+                               // Exit the loop if the background processor was requested to stop.
+                               if stop_thread.load(Ordering::Acquire) == true {
+                                       log_trace!(logger, "Terminating background processor.");
+                                       return Ok(());
+                               }
+                               if current_time.elapsed().as_secs() > FRESHNESS_TIMER {
+                                       log_trace!(logger, "Calling ChannelManager's and PeerManager's timer_tick_occurred");
+                                       channel_manager.timer_tick_occurred();
+                                       peer_manager.timer_tick_occurred();
+                                       current_time = Instant::now();
+                               }
+                       }
+               });
+               Self { stop_thread: stop_thread_clone, thread_handle: handle }
+       }
+
+       /// Stop `BackgroundProcessor`'s thread.
+       pub fn stop(self) -> Result<(), std::io::Error> {
+               self.stop_thread.store(true, Ordering::Release);
+               self.thread_handle.join().unwrap()
+       }
+}
+
+#[cfg(test)]
+mod tests {
+       use bitcoin::blockdata::constants::genesis_block;
+       use bitcoin::blockdata::transaction::{Transaction, TxOut};
+       use bitcoin::network::constants::Network;
+       use lightning::chain;
+       use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
+       use lightning::chain::chainmonitor;
+       use lightning::chain::keysinterface::{Sign, InMemorySigner, KeysInterface, KeysManager};
+       use lightning::chain::transaction::OutPoint;
+       use lightning::get_event_msg;
+       use lightning::ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, SimpleArcChannelManager};
+       use lightning::ln::features::InitFeatures;
+       use lightning::ln::msgs::ChannelMessageHandler;
+       use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor};
+       use lightning::util::config::UserConfig;
+       use lightning::util::events::{Event, EventsProvider, MessageSendEventsProvider, MessageSendEvent};
+       use lightning::util::logger::Logger;
+       use lightning::util::ser::Writeable;
+       use lightning::util::test_utils;
+       use lightning_persister::FilesystemPersister;
+       use std::fs;
+       use std::path::PathBuf;
+       use std::sync::{Arc, Mutex};
+       use std::time::Duration;
+       use super::BackgroundProcessor;
+
+       #[derive(Clone, Eq, Hash, PartialEq)]
+       struct TestDescriptor{}
+       impl SocketDescriptor for TestDescriptor {
+               fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
+                       0
+               }
+
+               fn disconnect_socket(&mut self) {}
+       }
+
+       type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
+
+       struct Node {
+               node: Arc<SimpleArcChannelManager<ChainMonitor, test_utils::TestBroadcaster, test_utils::TestFeeEstimator, test_utils::TestLogger>>,
+               peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, Arc<test_utils::TestLogger>>>,
+               persister: Arc<FilesystemPersister>,
+               logger: Arc<test_utils::TestLogger>,
+       }
+
+       impl Drop for Node {
+               fn drop(&mut self) {
+                       let data_dir = self.persister.get_data_dir();
+                       match fs::remove_dir_all(data_dir.clone()) {
+                               Err(e) => println!("Failed to remove test persister directory {}: {}", data_dir, e),
+                               _ => {}
+                       }
+               }
+       }
+
+       fn get_full_filepath(filepath: String, filename: String) -> String {
+               let mut path = PathBuf::from(filepath);
+               path.push(filename);
+               path.to_str().unwrap().to_string()
+       }
+
+       fn create_nodes(num_nodes: usize, persist_dir: String) -> Vec<Node> {
+               let mut nodes = Vec::new();
+               for i in 0..num_nodes {
+                       let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
+                       let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
+                       let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet));
+                       let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
+                       let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", persist_dir, i)));
+                       let seed = [i as u8; 32];
+                       let network = Network::Testnet;
+                       let now = Duration::from_secs(genesis_block(network).header.time as u64);
+                       let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
+                       let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone()));
+                       let params = ChainParameters {
+                               network,
+                               best_block: BestBlock::from_genesis(network),
+                       };
+                       let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster, logger.clone(), keys_manager.clone(), UserConfig::default(), params));
+                       let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new() )};
+                       let peer_manager = Arc::new(PeerManager::new(msg_handler, keys_manager.get_node_secret(), &seed, logger.clone()));
+                       let node = Node { node: manager, peer_manager, persister, logger };
+                       nodes.push(node);
+               }
+               nodes
+       }
+
+       macro_rules! open_channel {
+               ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
+                       $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None).unwrap();
+                       $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), InitFeatures::known(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
+                       $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), InitFeatures::known(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
+                       let events = $node_a.node.get_and_clear_pending_events();
+                       assert_eq!(events.len(), 1);
+                       let (temporary_channel_id, tx) = match events[0] {
+                               Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, user_channel_id } => {
+                                       assert_eq!(*channel_value_satoshis, $channel_value);
+                                       assert_eq!(user_channel_id, 42);
+
+                                       let tx = Transaction { version: 1 as i32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+                                               value: *channel_value_satoshis, script_pubkey: output_script.clone(),
+                                       }]};
+                                       (*temporary_channel_id, tx)
+                               },
+                               _ => panic!("Unexpected event"),
+                       };
+
+                       $node_a.node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
+                       $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
+                       $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
+                       tx
+               }}
+       }
+
+       #[test]
+       fn test_background_processor() {
+               // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
+               // updates. Also test that when new updates are available, the manager signals that it needs
+               // re-persistence and is successfully re-persisted.
+               let nodes = create_nodes(2, "test_background_processor".to_string());
+
+               // Initiate the background processors to watch each node.
+               let data_dir = nodes[0].persister.get_data_dir();
+               let callback = move |node: &ChannelManager<InMemorySigner, Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>>| FilesystemPersister::persist_manager(data_dir.clone(), node);
+               let bg_processor = BackgroundProcessor::start(callback, nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
+
+               // Go through the channel creation process until each node should have something persisted.
+               let tx = open_channel!(nodes[0], nodes[1], 100000);
+
+               macro_rules! check_persisted_data {
+                       ($node: expr, $filepath: expr, $expected_bytes: expr) => {
+                               match $node.write(&mut $expected_bytes) {
+                                       Ok(()) => {
+                                               loop {
+                                                       match std::fs::read($filepath) {
+                                                               Ok(bytes) => {
+                                                                       if bytes == $expected_bytes {
+                                                                               break
+                                                                       } else {
+                                                                               continue
+                                                                       }
+                                                               },
+                                                               Err(_) => continue
+                                                       }
+                                               }
+                                       },
+                                       Err(e) => panic!("Unexpected error: {}", e)
+                               }
+                       }
+               }
+
+               // Check that the initial channel manager data is persisted as expected.
+               let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "manager".to_string());
+               let mut expected_bytes = Vec::new();
+               check_persisted_data!(nodes[0].node, filepath.clone(), expected_bytes);
+               loop {
+                       if !nodes[0].node.get_persistence_condvar_value() { break }
+               }
+
+               // Force-close the channel.
+               nodes[0].node.force_close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).unwrap();
+
+               // Check that the force-close updates are persisted.
+               let mut expected_bytes = Vec::new();
+               check_persisted_data!(nodes[0].node, filepath.clone(), expected_bytes);
+               loop {
+                       if !nodes[0].node.get_persistence_condvar_value() { break }
+               }
+
+               assert!(bg_processor.stop().is_ok());
+       }
+
+       #[test]
+       fn test_timer_tick_called() {
+               // Test that ChannelManager's and PeerManager's `timer_tick_occurred` is called every
+               // `FRESHNESS_TIMER`.
+               let nodes = create_nodes(1, "test_timer_tick_called".to_string());
+               let data_dir = nodes[0].persister.get_data_dir();
+               let callback = move |node: &ChannelManager<InMemorySigner, Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>>| FilesystemPersister::persist_manager(data_dir.clone(), node);
+               let bg_processor = BackgroundProcessor::start(callback, nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
+               loop {
+                       let log_entries = nodes[0].logger.lines.lock().unwrap();
+                       let desired_log = "Calling ChannelManager's and PeerManager's timer_tick_occurred".to_string();
+                       if log_entries.get(&("lightning_background_processor".to_string(), desired_log)).is_some() {
+                               break
+                       }
+               }
+
+               assert!(bg_processor.stop().is_ok());
+       }
+
+       #[test]
+       fn test_persist_error() {
+               // Test that if we encounter an error during manager persistence, the thread panics.
+               fn persist_manager<Signer, M, T, K, F, L>(_data: &ChannelManager<Signer, Arc<M>, Arc<T>, Arc<K>, Arc<F>, Arc<L>>) -> Result<(), std::io::Error>
+               where Signer: 'static + Sign,
+                     M: 'static + chain::Watch<Signer>,
+                     T: 'static + BroadcasterInterface,
+                     K: 'static + KeysInterface<Signer=Signer>,
+                     F: 'static + FeeEstimator,
+                     L: 'static + Logger,
+               {
+                       Err(std::io::Error::new(std::io::ErrorKind::Other, "test"))
+               }
+
+               let nodes = create_nodes(2, "test_persist_error".to_string());
+               let bg_processor = BackgroundProcessor::start(persist_manager, nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
+               open_channel!(nodes[0], nodes[1], 100000);
+
+               let _ = bg_processor.thread_handle.join().unwrap().expect_err("Errored persisting manager: test");
+       }
+}
index 47bf9d744dcb91f554a69400521e439759e03f66..758578ca465b5f6cf57e29440e5439002c0ced64 100644 (file)
@@ -10,6 +10,7 @@ readme = "README.md"
 
 [dependencies]
 bech32 = "0.7"
+lightning = { version = "0.0.13", path = "../lightning" }
 secp256k1 = { version = "0.20", features = ["recovery"] }
 num-traits = "0.2.8"
 bitcoin_hashes = "0.9.4"
index d6cb92072bc69fc56815c4ebd713f0ba1d29b562..fe77a93a99b03f9707563f56ea556c56d5ac7c76 100644 (file)
@@ -10,6 +10,8 @@ use bech32::{u5, FromBase32};
 
 use bitcoin_hashes::Hash;
 use bitcoin_hashes::sha256;
+use lightning::routing::network_graph::RoutingFees;
+use lightning::routing::router::RouteHintHop;
 
 use num_traits::{CheckedAdd, CheckedMul};
 
@@ -353,7 +355,7 @@ impl FromBase32 for Signature {
        }
 }
 
-fn parse_int_be<T, U>(digits: &[U], base: T) -> Option<T>
+pub(crate) fn parse_int_be<T, U>(digits: &[U], base: T) -> Option<T>
        where T: CheckedAdd + CheckedMul + From<u8> + Default,
              U: Into<u8> + Copy
 {
@@ -428,7 +430,7 @@ impl FromBase32 for TaggedField {
                        constants::TAG_FALLBACK =>
                                Ok(TaggedField::Fallback(Fallback::from_base32(field_data)?)),
                        constants::TAG_ROUTE =>
-                               Ok(TaggedField::Route(Route::from_base32(field_data)?)),
+                               Ok(TaggedField::Route(RouteHint::from_base32(field_data)?)),
                        constants::TAG_PAYMENT_SECRET =>
                                Ok(TaggedField::PaymentSecret(PaymentSecret::from_base32(field_data)?)),
                        _ => {
@@ -565,17 +567,17 @@ impl FromBase32 for Fallback {
        }
 }
 
-impl FromBase32 for Route {
+impl FromBase32 for RouteHint {
        type Err = ParseError;
 
-       fn from_base32(field_data: &[u5]) -> Result<Route, ParseError> {
+       fn from_base32(field_data: &[u5]) -> Result<RouteHint, ParseError> {
                let bytes = Vec::<u8>::from_base32(field_data)?;
 
                if bytes.len() % 51 != 0 {
                        return Err(ParseError::UnexpectedEndOfTaggedFields);
                }
 
-               let mut route_hops = Vec::<RouteHop>::new();
+               let mut route_hops = Vec::<RouteHintHop>::new();
 
                let mut bytes = bytes.as_slice();
                while !bytes.is_empty() {
@@ -585,18 +587,22 @@ impl FromBase32 for Route {
                        let mut channel_id: [u8; 8] = Default::default();
                        channel_id.copy_from_slice(&hop_bytes[33..41]);
 
-                       let hop = RouteHop {
-                               pubkey: PublicKey::from_slice(&hop_bytes[0..33])?,
-                               short_channel_id: channel_id,
-                               fee_base_msat: parse_int_be(&hop_bytes[41..45], 256).expect("slice too big?"),
-                               fee_proportional_millionths: parse_int_be(&hop_bytes[45..49], 256).expect("slice too big?"),
-                               cltv_expiry_delta: parse_int_be(&hop_bytes[49..51], 256).expect("slice too big?")
+                       let hop = RouteHintHop {
+                               src_node_id: PublicKey::from_slice(&hop_bytes[0..33])?,
+                               short_channel_id: parse_int_be(&channel_id, 256).expect("short chan ID slice too big?"),
+                               fees: RoutingFees {
+                                       base_msat: parse_int_be(&hop_bytes[41..45], 256).expect("slice too big?"),
+                                       proportional_millionths: parse_int_be(&hop_bytes[45..49], 256).expect("slice too big?"),
+                               },
+                               cltv_expiry_delta: parse_int_be(&hop_bytes[49..51], 256).expect("slice too big?"),
+                               htlc_minimum_msat: None,
+                               htlc_maximum_msat: None,
                        };
 
                        route_hops.push(hop);
                }
 
-               Ok(Route(route_hops))
+               Ok(RouteHint(route_hops))
        }
 }
 
@@ -931,47 +937,57 @@ mod test {
 
        #[test]
        fn test_parse_route() {
-               use RouteHop;
-               use ::Route;
+               use lightning::routing::network_graph::RoutingFees;
+               use lightning::routing::router::RouteHintHop;
+               use ::RouteHint;
                use bech32::FromBase32;
+               use de::parse_int_be;
 
                let input = from_bech32(
                        "q20q82gphp2nflc7jtzrcazrra7wwgzxqc8u7754cdlpfrmccae92qgzqvzq2ps8pqqqqqqpqqqqq9qqqvpeuqa\
                        fqxu92d8lr6fvg0r5gv0heeeqgcrqlnm6jhphu9y00rrhy4grqszsvpcgpy9qqqqqqgqqqqq7qqzq".as_bytes()
                );
 
-               let mut expected = Vec::<RouteHop>::new();
-               expected.push(RouteHop {
-                       pubkey: PublicKey::from_slice(
+               let mut expected = Vec::<RouteHintHop>::new();
+               expected.push(RouteHintHop {
+                       src_node_id: PublicKey::from_slice(
                                &[
                                        0x02u8, 0x9e, 0x03, 0xa9, 0x01, 0xb8, 0x55, 0x34, 0xff, 0x1e, 0x92, 0xc4, 0x3c,
                                        0x74, 0x43, 0x1f, 0x7c, 0xe7, 0x20, 0x46, 0x06, 0x0f, 0xcf, 0x7a, 0x95, 0xc3,
                                        0x7e, 0x14, 0x8f, 0x78, 0xc7, 0x72, 0x55
                                ][..]
                        ).unwrap(),
-                       short_channel_id: [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08],
-                       fee_base_msat: 1,
-                       fee_proportional_millionths: 20,
-                       cltv_expiry_delta: 3
+                       short_channel_id: parse_int_be(&[0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08], 256).expect("short chan ID slice too big?"),
+                       fees: RoutingFees {
+                               base_msat: 1,
+                               proportional_millionths: 20,
+                       },
+                       cltv_expiry_delta: 3,
+                       htlc_minimum_msat: None,
+                       htlc_maximum_msat: None
                });
-               expected.push(RouteHop {
-                       pubkey: PublicKey::from_slice(
+               expected.push(RouteHintHop {
+                       src_node_id: PublicKey::from_slice(
                                &[
                                        0x03u8, 0x9e, 0x03, 0xa9, 0x01, 0xb8, 0x55, 0x34, 0xff, 0x1e, 0x92, 0xc4, 0x3c,
                                        0x74, 0x43, 0x1f, 0x7c, 0xe7, 0x20, 0x46, 0x06, 0x0f, 0xcf, 0x7a, 0x95, 0xc3,
                                        0x7e, 0x14, 0x8f, 0x78, 0xc7, 0x72, 0x55
                                ][..]
                        ).unwrap(),
-                       short_channel_id: [0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a],
-                       fee_base_msat: 2,
-                       fee_proportional_millionths: 30,
-                       cltv_expiry_delta: 4
+                       short_channel_id: parse_int_be(&[0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a], 256).expect("short chan ID slice too big?"),
+                       fees: RoutingFees {
+                               base_msat: 2,
+                               proportional_millionths: 30,
+                       },
+                       cltv_expiry_delta: 4,
+                       htlc_minimum_msat: None,
+                       htlc_maximum_msat: None
                });
 
-               assert_eq!(Route::from_base32(&input), Ok(Route(expected)));
+               assert_eq!(RouteHint::from_base32(&input), Ok(RouteHint(expected)));
 
                assert_eq!(
-                       Route::from_base32(&[u5::try_from_u8(0).unwrap(); 40][..]),
+                       RouteHint::from_base32(&[u5::try_from_u8(0).unwrap(); 40][..]),
                        Err(ParseError::UnexpectedEndOfTaggedFields)
                );
        }
index e9ca442f2b2006ea5ff343a0d4785ce7dc945899..2ce022248d0ad2479247bb4b9e2d29ff7d0451ca 100644 (file)
 
 extern crate bech32;
 extern crate bitcoin_hashes;
+extern crate lightning;
 extern crate num_traits;
 extern crate secp256k1;
 
 use bech32::u5;
 use bitcoin_hashes::Hash;
 use bitcoin_hashes::sha256;
+#[cfg(any(doc, test))]
+use lightning::routing::network_graph::RoutingFees;
+use lightning::routing::router::RouteHintHop;
 
 use secp256k1::key::PublicKey;
 use secp256k1::{Message, Secp256k1};
@@ -323,7 +327,7 @@ pub enum TaggedField {
        ExpiryTime(ExpiryTime),
        MinFinalCltvExpiry(MinFinalCltvExpiry),
        Fallback(Fallback),
-       Route(Route),
+       Route(RouteHint),
        PaymentSecret(PaymentSecret),
 }
 
@@ -383,26 +387,7 @@ pub struct Signature(pub RecoverableSignature);
 /// The encoded route has to be <1024 5bit characters long (<=639 bytes or <=12 hops)
 ///
 #[derive(Eq, PartialEq, Debug, Clone)]
-pub struct Route(Vec<RouteHop>);
-
-/// Node on a private route
-#[derive(Eq, PartialEq, Debug, Clone)]
-pub struct RouteHop {
-       /// Node's public key
-       pub pubkey: PublicKey,
-
-       /// Which channel of this node we would be using
-       pub short_channel_id: [u8; 8],
-
-       /// Fee charged by this node per transaction
-       pub fee_base_msat: u32,
-
-       /// Fee charged by this node proportional to the amount routed
-       pub fee_proportional_millionths: u32,
-
-       /// Delta substracted by this node from incoming cltv_expiry value
-       pub cltv_expiry_delta: u16,
-}
+pub struct RouteHint(Vec<RouteHintHop>);
 
 /// Tag constants as specified in BOLT11
 #[allow(missing_docs)]
@@ -499,8 +484,8 @@ impl<D: tb::Bool, H: tb::Bool, T: tb::Bool> InvoiceBuilder<D, H, T> {
        }
 
        /// Adds a private route.
-       pub fn route(mut self, route: Vec<RouteHop>) -> Self {
-               match Route::new(route) {
+       pub fn route(mut self, route: Vec<RouteHintHop>) -> Self {
+               match RouteHint::new(route) {
                        Ok(r) => self.tagged_fields.push(TaggedField::Route(r)),
                        Err(e) => self.error = Some(e),
                }
@@ -832,11 +817,11 @@ impl RawInvoice {
                }).collect::<Vec<&Fallback>>()
        }
 
-       pub fn routes(&self) -> Vec<&Route> {
+       pub fn routes(&self) -> Vec<&RouteHint> {
                self.known_tagged_fields().filter_map(|tf| match tf {
                        &TaggedField::Route(ref r) => Some(r),
                        _ => None,
-               }).collect::<Vec<&Route>>()
+               }).collect::<Vec<&RouteHint>>()
        }
 
        pub fn amount_pico_btc(&self) -> Option<u64> {
@@ -1035,7 +1020,7 @@ impl Invoice {
        }
 
        /// Returns a list of all routes included in the invoice
-       pub fn routes(&self) -> Vec<&Route> {
+       pub fn routes(&self) -> Vec<&RouteHint> {
                self.signed_invoice.routes()
        }
 
@@ -1157,32 +1142,32 @@ impl ExpiryTime {
        }
 }
 
-impl Route {
+impl RouteHint {
        /// Create a new (partial) route from a list of hops
-       pub fn new(hops: Vec<RouteHop>) -> Result<Route, CreationError> {
+       pub fn new(hops: Vec<RouteHintHop>) -> Result<RouteHint, CreationError> {
                if hops.len() <= 12 {
-                       Ok(Route(hops))
+                       Ok(RouteHint(hops))
                } else {
                        Err(CreationError::RouteTooLong)
                }
        }
 
        /// Returrn the underlying vector of hops
-       pub fn into_inner(self) -> Vec<RouteHop> {
+       pub fn into_inner(self) -> Vec<RouteHintHop> {
                self.0
        }
 }
 
-impl Into<Vec<RouteHop>> for Route {
-       fn into(self) -> Vec<RouteHop> {
+impl Into<Vec<RouteHintHop>> for RouteHint {
+       fn into(self) -> Vec<RouteHintHop> {
                self.into_inner()
        }
 }
 
-impl Deref for Route {
-       type Target = Vec<RouteHop>;
+impl Deref for RouteHint {
+       type Target = Vec<RouteHintHop>;
 
-       fn deref(&self) -> &Vec<RouteHop> {
+       fn deref(&self) -> &Vec<RouteHintHop> {
                &self.0
        }
 }
@@ -1458,18 +1443,22 @@ mod test {
                        .build_raw();
                assert_eq!(long_desc_res, Err(CreationError::DescriptionTooLong));
 
-               let route_hop = RouteHop {
-                       pubkey: PublicKey::from_slice(
+               let route_hop = RouteHintHop {
+                       src_node_id: PublicKey::from_slice(
                                        &[
                                                0x03, 0x9e, 0x03, 0xa9, 0x01, 0xb8, 0x55, 0x34, 0xff, 0x1e, 0x92, 0xc4,
                                                0x3c, 0x74, 0x43, 0x1f, 0x7c, 0xe7, 0x20, 0x46, 0x06, 0x0f, 0xcf, 0x7a,
                                                0x95, 0xc3, 0x7e, 0x14, 0x8f, 0x78, 0xc7, 0x72, 0x55
                                        ][..]
                                ).unwrap(),
-                       short_channel_id: [0; 8],
-                       fee_base_msat: 0,
-                       fee_proportional_millionths: 0,
+                       short_channel_id: 0,
+                       fees: RoutingFees {
+                               base_msat: 0,
+                               proportional_millionths: 0,
+                       },
                        cltv_expiry_delta: 0,
+                       htlc_minimum_msat: None,
+                       htlc_maximum_msat: None,
                };
                let too_long_route = vec![route_hop; 13];
                let long_route_res = builder.clone()
@@ -1505,36 +1494,52 @@ mod test {
                let public_key = PublicKey::from_secret_key(&secp_ctx, &private_key);
 
                let route_1 = vec![
-                       RouteHop {
-                               pubkey: public_key.clone(),
-                               short_channel_id: [123; 8],
-                               fee_base_msat: 2,
-                               fee_proportional_millionths: 1,
+                       RouteHintHop {
+                               src_node_id: public_key.clone(),
+                               short_channel_id: de::parse_int_be(&[123; 8], 256).expect("short chan ID slice too big?"),
+                               fees: RoutingFees {
+                                       base_msat: 2,
+                                       proportional_millionths: 1,
+                               },
                                cltv_expiry_delta: 145,
+                               htlc_minimum_msat: None,
+                               htlc_maximum_msat: None,
                        },
-                       RouteHop {
-                               pubkey: public_key.clone(),
-                               short_channel_id: [42; 8],
-                               fee_base_msat: 3,
-                               fee_proportional_millionths: 2,
+                       RouteHintHop {
+                               src_node_id: public_key.clone(),
+                               short_channel_id: de::parse_int_be(&[42; 8], 256).expect("short chan ID slice too big?"),
+                               fees: RoutingFees {
+                                       base_msat: 3,
+                                       proportional_millionths: 2,
+                               },
                                cltv_expiry_delta: 146,
+                               htlc_minimum_msat: None,
+                               htlc_maximum_msat: None,
                        }
                ];
 
                let route_2 = vec![
-                       RouteHop {
-                               pubkey: public_key.clone(),
-                               short_channel_id: [0; 8],
-                               fee_base_msat: 4,
-                               fee_proportional_millionths: 3,
+                       RouteHintHop {
+                               src_node_id: public_key.clone(),
+                               short_channel_id: 0,
+                               fees: RoutingFees {
+                                       base_msat: 4,
+                                       proportional_millionths: 3,
+                               },
                                cltv_expiry_delta: 147,
+                               htlc_minimum_msat: None,
+                               htlc_maximum_msat: None,
                        },
-                       RouteHop {
-                               pubkey: public_key.clone(),
-                               short_channel_id: [1; 8],
-                               fee_base_msat: 5,
-                               fee_proportional_millionths: 4,
+                       RouteHintHop {
+                               src_node_id: public_key.clone(),
+                               short_channel_id: de::parse_int_be(&[1; 8], 256).expect("short chan ID slice too big?"),
+                               fees: RoutingFees {
+                                       base_msat: 5,
+                                       proportional_millionths: 4,
+                               },
                                cltv_expiry_delta: 148,
+                               htlc_minimum_msat: None,
+                               htlc_maximum_msat: None,
                        }
                ];
 
@@ -1568,7 +1573,7 @@ mod test {
                assert_eq!(invoice.expiry_time(), Duration::from_secs(54321));
                assert_eq!(invoice.min_final_cltv_expiry(), Some(&144));
                assert_eq!(invoice.fallbacks(), vec![&Fallback::PubKeyHash([0;20])]);
-               assert_eq!(invoice.routes(), vec![&Route(route_1), &Route(route_2)]);
+               assert_eq!(invoice.routes(), vec![&RouteHint(route_1), &RouteHint(route_2)]);
                assert_eq!(
                        invoice.description(),
                        InvoiceDescription::Hash(&Sha256(sha256::Hash::from_slice(&[3;32][..]).unwrap()))
index 2b4332f8633bea9c16fd943de8844d99b5b69e68..83888e8269da1af358cd8cbf851d6e8b2c0e06da 100644 (file)
@@ -365,22 +365,26 @@ impl Base32Len for Fallback {
        }
 }
 
-impl ToBase32 for Route {
+impl ToBase32 for RouteHint {
        fn write_base32<W: WriteBase32>(&self, writer: &mut W) -> Result<(), <W as WriteBase32>::Err> {
                let mut converter = BytesToBase32::new(writer);
 
                for hop in self.iter() {
-                       converter.append(&hop.pubkey.serialize()[..])?;
-                       converter.append(&hop.short_channel_id[..])?;
+                       converter.append(&hop.src_node_id.serialize()[..])?;
+                       let short_channel_id = try_stretch(
+                               encode_int_be_base256(hop.short_channel_id),
+                               8
+                       ).expect("sizeof(u64) == 8");
+                       converter.append(&short_channel_id)?;
 
                        let fee_base_msat = try_stretch(
-                               encode_int_be_base256(hop.fee_base_msat),
+                               encode_int_be_base256(hop.fees.base_msat),
                                4
                        ).expect("sizeof(u32) == 4");
                        converter.append(&fee_base_msat)?;
 
                        let fee_proportional_millionths = try_stretch(
-                               encode_int_be_base256(hop.fee_proportional_millionths),
+                               encode_int_be_base256(hop.fees.proportional_millionths),
                                4
                        ).expect("sizeof(u32) == 4");
                        converter.append(&fee_proportional_millionths)?;
@@ -397,7 +401,7 @@ impl ToBase32 for Route {
        }
 }
 
-impl Base32Len for Route {
+impl Base32Len for RouteHint {
        fn base32_len(&self) -> usize {
                bytes_size_to_base32_size(self.0.len() * 51)
        }
index d6fd023b4de759396e8ae7e8fa127cee4c35f2dd..f0b23c08619632c4ab14d45f5322d51cc017c38d 100644 (file)
@@ -746,7 +746,7 @@ pub(crate) struct ChannelMonitorImpl<Signer: Sign> {
 }
 
 /// Transaction outputs to watch for on-chain spends.
-pub(super) type TransactionOutputs = (Txid, Vec<(u32, TxOut)>);
+pub type TransactionOutputs = (Txid, Vec<(u32, TxOut)>);
 
 #[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))]
 /// Used only in testing and fuzztarget to check serialization roundtrips don't change the
index 9488a34db0d7e04d322b787bc47501be5cba0c80..d4eb9eae5e7910f7a0b4de52ab6591794210f8ea 100644 (file)
@@ -1020,9 +1020,9 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref> PeerManager<D
                        // buffer by doing things like announcing channels on another node. We should be willing to
                        // drop optional-ish messages when send buffers get full!
 
+                       let mut peers_lock = self.peers.lock().unwrap();
                        let mut events_generated = self.message_handler.chan_handler.get_and_clear_pending_msg_events();
                        events_generated.append(&mut self.message_handler.route_handler.get_and_clear_pending_msg_events());
-                       let mut peers_lock = self.peers.lock().unwrap();
                        let peers = &mut *peers_lock;
                        for event in events_generated.drain(..) {
                                macro_rules! get_peer_for_forwarding {
index 0ad307cdf2f17bf1a347b535ebf5009e9a000c14..08fe95d2305293e7440a04bc407a4110b87c3901 100644 (file)
@@ -117,8 +117,8 @@ impl Readable for Route {
 }
 
 /// A channel descriptor which provides a last-hop route to get_route
-#[derive(Clone)]
-pub struct RouteHint {
+#[derive(Eq, PartialEq, Debug, Clone)]
+pub struct RouteHintHop {
        /// The node_id of the non-target end of the route
        pub src_node_id: PublicKey,
        /// The short_channel_id of this channel
@@ -176,7 +176,7 @@ struct DummyDirectionalChannelInfo {
 /// These fee values are useful to choose hops as we traverse the graph "payee-to-payer".
 #[derive(Clone)]
 struct PathBuildingHop<'a> {
-       // The RouteHint fields which will eventually be used if this hop is used in a final Route.
+       // The RouteHintHop fields which will eventually be used if this hop is used in a final Route.
        // Note that node_features is calculated separately after our initial graph walk.
        pubkey: PublicKey,
        short_channel_id: u64,
@@ -353,7 +353,7 @@ fn compute_fees(amount_msat: u64, channel_fees: RoutingFees) -> Option<u64> {
 /// equal), however the enabled/disabled bit on such channels as well as the
 /// htlc_minimum_msat/htlc_maximum_msat *are* checked as they may change based on the receiving node.
 pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, payee: &PublicKey, payee_features: Option<InvoiceFeatures>, first_hops: Option<&[&ChannelDetails]>,
-       last_hops: &[&RouteHint], final_value_msat: u64, final_cltv: u32, logger: L) -> Result<Route, LightningError> where L::Target: Logger {
+       last_hops: &[&RouteHintHop], final_value_msat: u64, final_cltv: u32, logger: L) -> Result<Route, LightningError> where L::Target: Logger {
        // TODO: Obviously *only* using total fee cost sucks. We should consider weighting by
        // uptime/success in using a node in the past.
        if *payee == *our_node_id {
@@ -1163,7 +1163,7 @@ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, paye
 
 #[cfg(test)]
 mod tests {
-       use routing::router::{get_route, RouteHint, RoutingFees};
+       use routing::router::{get_route, RouteHintHop, RoutingFees};
        use routing::network_graph::{NetworkGraph, NetGraphMsgHandler};
        use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures};
        use ln::msgs::{ErrorAction, LightningError, OptionalField, UnsignedChannelAnnouncement, ChannelAnnouncement, RoutingMessageHandler,
@@ -2084,19 +2084,19 @@ mod tests {
                assert_eq!(route.paths[0][1].channel_features.le_flags(), &id_to_feature_flags(13));
        }
 
-       fn last_hops(nodes: &Vec<PublicKey>) -> Vec<RouteHint> {
+       fn last_hops(nodes: &Vec<PublicKey>) -> Vec<RouteHintHop> {
                let zero_fees = RoutingFees {
                        base_msat: 0,
                        proportional_millionths: 0,
                };
-               vec!(RouteHint {
+               vec!(RouteHintHop {
                        src_node_id: nodes[3].clone(),
                        short_channel_id: 8,
                        fees: zero_fees,
                        cltv_expiry_delta: (8 << 8) | 1,
                        htlc_minimum_msat: None,
                        htlc_maximum_msat: None,
-               }, RouteHint {
+               }, RouteHintHop {
                        src_node_id: nodes[4].clone(),
                        short_channel_id: 9,
                        fees: RoutingFees {
@@ -2106,7 +2106,7 @@ mod tests {
                        cltv_expiry_delta: (9 << 8) | 1,
                        htlc_minimum_msat: None,
                        htlc_maximum_msat: None,
-               }, RouteHint {
+               }, RouteHintHop {
                        src_node_id: nodes[5].clone(),
                        short_channel_id: 10,
                        fees: zero_fees,
@@ -2124,7 +2124,7 @@ mod tests {
                // Simple test across 2, 3, 5, and 4 via a last_hop channel
 
                // First check that lst hop can't have its source as the payee.
-               let invalid_last_hop = RouteHint {
+               let invalid_last_hop = RouteHintHop {
                        src_node_id: nodes[6],
                        short_channel_id: 8,
                        fees: RoutingFees {
@@ -2309,7 +2309,7 @@ mod tests {
                let target_node_id = PublicKey::from_secret_key(&Secp256k1::new(), &SecretKey::from_slice(&hex::decode(format!("{:02}", 43).repeat(32)).unwrap()[..]).unwrap());
 
                // If we specify a channel to a middle hop, that overrides our local channel view and that gets used
-               let last_hops = vec![RouteHint {
+               let last_hops = vec![RouteHintHop {
                        src_node_id: middle_node_id,
                        short_channel_id: 8,
                        fees: RoutingFees {
diff --git a/lightning/src/util/message_signing.rs b/lightning/src/util/message_signing.rs
new file mode 100644 (file)
index 0000000..b73ba2b
--- /dev/null
@@ -0,0 +1,140 @@
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Lightning message signing and verification lives here. These tools can be used to sign messages using the node's
+//! secret so receivers are sure that they come from you. You can also use this to verify that a given message comes
+//! from a specific node.
+//! Furthermore, these tools can be used to sign / verify messages using ephemeral keys not tied to node's identities.
+//!
+//! Note this is not part of the specs, but follows lnd's signing and verifying protocol, which can is defined as follows:
+//!
+//! signature = zbase32(SigRec(sha256d(("Lightning Signed Message:" + msg)))
+//! zbase32 from https://philzimmermann.com/docs/human-oriented-base-32-encoding.txt
+//! SigRec has first byte 31 + recovery id, followed by 64 byte sig.
+//!
+//! This implementation is compatible with both lnd's and c-lightning's
+//!
+//! https://lightning.readthedocs.io/lightning-signmessage.7.html
+//! https://api.lightning.community/#signmessage
+
+use crate::util::zbase32;
+use bitcoin::hashes::{sha256d, Hash};
+use bitcoin::secp256k1::recovery::{RecoverableSignature, RecoveryId};
+use bitcoin::secp256k1::{Error, Message, PublicKey, Secp256k1, SecretKey};
+
+static LN_MESSAGE_PREFIX: &[u8] = b"Lightning Signed Message:";
+
+fn sigrec_encode(sig_rec: RecoverableSignature) -> Vec<u8> {
+    let (rid, rsig) = sig_rec.serialize_compact();
+    let prefix = rid.to_i32() as u8 + 31;
+
+    [&[prefix], &rsig[..]].concat()
+}
+
+fn sigrec_decode(sig_rec: Vec<u8>) -> Result<RecoverableSignature, Error> {
+    let rsig = &sig_rec[1..];
+    let rid = sig_rec[0] as i32 - 31;
+
+    match RecoveryId::from_i32(rid) {
+        Ok(x) => RecoverableSignature::from_compact(rsig, x),
+        Err(e) => Err(e)
+    }
+}
+
+/// Creates a digital signature of a message given a SecretKey, like the node's secret.
+/// A receiver knowing the PublicKey (e.g. the node's id) and the message can be sure that the signature was generated by the caller.
+/// Signatures are EC recoverable, meaning that given the message and the signature the PublicKey of the signer can be extracted.
+pub fn sign(msg: &[u8], sk: SecretKey) -> Result<String, Error> {
+    let secp_ctx = Secp256k1::signing_only();
+    let msg_hash = sha256d::Hash::hash(&[LN_MESSAGE_PREFIX, msg].concat());
+
+    let sig = secp_ctx.sign_recoverable(&Message::from_slice(&msg_hash)?, &sk);
+    Ok(zbase32::encode(&sigrec_encode(sig)))
+}
+
+/// Recovers the PublicKey of the signer of the message given the message and the signature.
+pub fn recover_pk(msg: &[u8], sig: &str) ->  Result<PublicKey, Error> {
+    let secp_ctx = Secp256k1::verification_only();
+    let msg_hash = sha256d::Hash::hash(&[LN_MESSAGE_PREFIX, msg].concat());
+
+    match zbase32::decode(&sig) {
+        Ok(sig_rec) => {
+            match sigrec_decode(sig_rec) {
+                Ok(sig) => secp_ctx.recover(&Message::from_slice(&msg_hash)?, &sig),
+                Err(e) => Err(e)
+            }
+        },
+        Err(_) => Err(Error::InvalidSignature)
+    }
+}
+
+/// Verifies a message was signed by a PrivateKey that derives to a given PublicKey, given a message, a signature,
+/// and the PublicKey.
+pub fn verify(msg: &[u8], sig: &str, pk: PublicKey) -> bool {
+    match recover_pk(msg, sig) {
+        Ok(x) => x == pk,
+        Err(_) => false
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use std::str::FromStr;
+    use util::message_signing::{sign, recover_pk, verify};
+    use bitcoin::secp256k1::key::ONE_KEY;
+    use bitcoin::secp256k1::{PublicKey, Secp256k1};
+
+    #[test]
+    fn test_sign() {
+        let message = "test message";
+        let zbase32_sig = sign(message.as_bytes(), ONE_KEY);
+
+        assert_eq!(zbase32_sig.unwrap(), "d9tibmnic9t5y41hg7hkakdcra94akas9ku3rmmj4ag9mritc8ok4p5qzefs78c9pqfhpuftqqzhydbdwfg7u6w6wdxcqpqn4sj4e73e")
+    }
+
+    #[test]
+    fn test_recover_pk() {
+        let message = "test message";
+        let sig = "d9tibmnic9t5y41hg7hkakdcra94akas9ku3rmmj4ag9mritc8ok4p5qzefs78c9pqfhpuftqqzhydbdwfg7u6w6wdxcqpqn4sj4e73e";
+        let pk = recover_pk(message.as_bytes(), sig);
+
+        assert_eq!(pk.unwrap(), PublicKey::from_secret_key(&Secp256k1::signing_only(), &ONE_KEY))
+    }
+
+    #[test]
+    fn test_verify() {
+        let message = "another message";
+        let sig = sign(message.as_bytes(), ONE_KEY).unwrap();
+        let pk = PublicKey::from_secret_key(&Secp256k1::signing_only(), &ONE_KEY);
+
+        assert!(verify(message.as_bytes(), &sig, pk))
+    }
+
+    #[test]
+    fn test_verify_ground_truth_ish() {
+        // There are no standard tests vectors for Sign/Verify, using the same tests vectors as c-lightning to see if they are compatible.
+        // Taken from https://github.com/ElementsProject/lightning/blob/1275af6fbb02460c8eb2f00990bb0ef9179ce8f3/tests/test_misc.py#L1925-L1938
+
+        let corpus = [
+            ["@bitconner",
+             "is this compatible?",
+             "rbgfioj114mh48d8egqx8o9qxqw4fmhe8jbeeabdioxnjk8z3t1ma1hu1fiswpakgucwwzwo6ofycffbsqusqdimugbh41n1g698hr9t",
+             "02b80cabdf82638aac86948e4c06e82064f547768dcef977677b9ea931ea75bab5"],
+            ["@duck1123",
+             "hi",
+             "rnrphcjswusbacjnmmmrynh9pqip7sy5cx695h6mfu64iac6qmcmsd8xnsyczwmpqp9shqkth3h4jmkgyqu5z47jfn1q7gpxtaqpx4xg",
+             "02de60d194e1ca5947b59fe8e2efd6aadeabfb67f2e89e13ae1a799c1e08e4a43b"],
+            ["@jochemin",
+             "hi",
+             "ry8bbsopmduhxy3dr5d9ekfeabdpimfx95kagdem7914wtca79jwamtbw4rxh69hg7n6x9ty8cqk33knbxaqftgxsfsaeprxkn1k48p3",
+             "022b8ece90ee891cbcdac0c1cc6af46b73c47212d8defbce80265ac81a6b794931"],
+        ];
+
+        for c in &corpus {
+            assert!(verify(c[1].as_bytes(), c[2], PublicKey::from_str(c[3]).unwrap()))
+        }
+    }
+}
index 04b77872c89011a4b6491019ed252da826e5d363..e8118a9a1a9de962af52a5f171b3d79506f4c168 100644 (file)
@@ -15,9 +15,14 @@ pub(crate) mod fuzz_wrappers;
 pub mod events;
 pub mod errors;
 pub mod ser;
+pub mod message_signing;
 
 pub(crate) mod byte_utils;
 pub(crate) mod chacha20;
+#[cfg(feature = "fuzztarget")]
+pub mod zbase32;
+#[cfg(not(feature = "fuzztarget"))]
+pub(crate) mod zbase32;
 #[cfg(not(feature = "fuzztarget"))]
 pub(crate) mod poly1305;
 pub(crate) mod chacha20poly1305rfc;
diff --git a/lightning/src/util/zbase32.rs b/lightning/src/util/zbase32.rs
new file mode 100644 (file)
index 0000000..e17ac3e
--- /dev/null
@@ -0,0 +1,142 @@
+// This is a modification of base32 encoding to support the zbase32 alphabet.
+// The original piece of software can be found at https://github.com/andreasots/base32
+// The original portions of this software are Copyright (c) 2015 The base32 Developers
+
+/* This file is licensed under either of
+ *  Apache License, Version 2.0, (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or
+ *  MIT license (LICENSE-MIT or http://opensource.org/licenses/MIT)
+ * at your option.
+*/
+
+const ALPHABET: &'static [u8] = b"ybndrfg8ejkmcpqxot1uwisza345h769";
+
+/// Encodes some bytes as a zbase32 string
+pub fn encode(data: &[u8]) -> String {
+       let mut ret = Vec::with_capacity((data.len() + 4) / 5 * 8);
+
+       for chunk in data.chunks(5) {
+               let buf = {
+                       let mut buf = [0u8; 5];
+                       for (i, &b) in chunk.iter().enumerate() {
+                               buf[i] = b;
+                       }
+                       buf
+               };
+
+               ret.push(ALPHABET[((buf[0] & 0xF8) >> 3) as usize]);
+               ret.push(ALPHABET[(((buf[0] & 0x07) << 2) | ((buf[1] & 0xC0) >> 6)) as usize]);
+               ret.push(ALPHABET[((buf[1] & 0x3E) >> 1) as usize]);
+               ret.push(ALPHABET[(((buf[1] & 0x01) << 4) | ((buf[2] & 0xF0) >> 4)) as usize]);
+               ret.push(ALPHABET[(((buf[2] & 0x0F) << 1) | (buf[3] >> 7)) as usize]);
+               ret.push(ALPHABET[((buf[3] & 0x7C) >> 2) as usize]);
+               ret.push(ALPHABET[(((buf[3] & 0x03) << 3) | ((buf[4] & 0xE0) >> 5)) as usize]);
+               ret.push(ALPHABET[(buf[4] & 0x1F) as usize]);
+       }
+
+       ret.truncate((data.len() * 8 + 4) / 5);
+
+       // Check that our capacity calculation doesn't under-shoot in fuzzing
+       #[cfg(fuzzing)]
+       assert_eq!(ret.capacity(), (data.len() + 4) / 5 * 8);
+
+       String::from_utf8(ret).unwrap()
+}
+
+// ASCII 0-Z
+const INV_ALPHABET: [i8; 43] = [
+       -1, 18, -1, 25, 26, 27, 30, 29, 7, 31, -1, -1, -1, -1, -1, -1, -1,  24, 1, 12, 3, 8, 5, 6, 28,
+       21, 9, 10, -1, 11, 2, 16, 13, 14, 4, 22, 17, 19, -1, 20, 15, 0, 23,
+];
+
+/// Decodes a zbase32 string to the original bytes, failing if the string was not encoded by a
+/// proper zbase32 encoder.
+pub fn decode(data: &str) -> Result<Vec<u8>, ()> {
+       if !data.is_ascii() {
+               return Err(());
+       }
+
+       let data = data.as_bytes();
+       let output_length = data.len() * 5 / 8;
+       if data.len() > (output_length * 8 + 4) / 5 {
+               // If the string has more charachters than are required to encode the number of bytes
+               // decodable, treat the string as invalid.
+               return Err(());
+       }
+
+       let mut ret = Vec::with_capacity((data.len() + 7) / 8 * 5);
+
+       for chunk in data.chunks(8) {
+               let buf = {
+                       let mut buf = [0u8; 8];
+                       for (i, &c) in chunk.iter().enumerate() {
+                               match INV_ALPHABET.get(c.to_ascii_uppercase().wrapping_sub(b'0') as usize) {
+                                       Some(&-1) | None => return Err(()),
+                                       Some(&value) => buf[i] = value as u8,
+                               };
+                       }
+                       buf
+               };
+               ret.push((buf[0] << 3) | (buf[1] >> 2));
+               ret.push((buf[1] << 6) | (buf[2] << 1) | (buf[3] >> 4));
+               ret.push((buf[3] << 4) | (buf[4] >> 1));
+               ret.push((buf[4] << 7) | (buf[5] << 2) | (buf[6] >> 3));
+               ret.push((buf[6] << 5) | buf[7]);
+       }
+       for c in ret.drain(output_length..) {
+               if c != 0 {
+                       // If the original string had any bits set at positions outside of the encoded data,
+                       // treat the string as invalid.
+                       return Err(());
+               }
+       }
+
+       // Check that our capacity calculation doesn't under-shoot in fuzzing
+       #[cfg(fuzzing)]
+       assert_eq!(ret.capacity(), (data.len() + 7) / 8 * 5);
+
+       Ok(ret)
+}
+
+#[cfg(test)]
+mod tests {
+       use super::*;
+
+       const TEST_DATA: &[(&str, &[u8])] = &[
+               ("",       &[]),
+               ("yy",   &[0x00]),
+               ("oy",   &[0x80]),
+               ("tqrey",   &[0x8b, 0x88, 0x80]),
+               ("6n9hq",  &[0xf0, 0xbf, 0xc7]),
+               ("4t7ye",  &[0xd4, 0x7a, 0x04]),
+               ("6im5sdy", &[0xf5, 0x57, 0xbb, 0x0c]),
+               ("ybndrfg8ejkmcpqxot1uwisza345h769", &[0x00, 0x44, 0x32, 0x14, 0xc7, 0x42, 0x54, 0xb6,
+                                                                                                       0x35, 0xcf, 0x84, 0x65, 0x3a, 0x56, 0xd7, 0xc6,
+                                                                                                       0x75, 0xbe, 0x77, 0xdf])
+       ];
+
+       #[test]
+       fn test_encode() {
+               for &(zbase32, data) in TEST_DATA {
+                       assert_eq!(encode(data), zbase32);
+               }
+       }
+
+       #[test]
+       fn test_decode() {
+               for &(zbase32, data) in TEST_DATA {
+                       assert_eq!(decode(zbase32).unwrap(), data);
+               }
+       }
+
+       #[test]
+       fn test_decode_wrong() {
+               const WRONG_DATA: &[&str] = &["00", "l1", "?", "="];
+
+               for &data in WRONG_DATA {
+                       match decode(data) {
+                               Ok(_) => assert!(false, "Data shouldn't be decodable"),
+                               Err(_) => assert!(true),
+                       }
+               }
+       }
+}