Merge pull request #1434 from TheBlueMatt/2022-04-robust-payment-claims
authorvalentinewallace <valentinewallace@users.noreply.github.com>
Mon, 30 May 2022 17:05:01 +0000 (10:05 -0700)
committerGitHub <noreply@github.com>
Mon, 30 May 2022 17:05:01 +0000 (10:05 -0700)
Improve Robustness of Inbound MPP Claims Across Restart

44 files changed:
.editorconfig
.github/workflows/build.yml
Cargo.toml
fuzz/Cargo.toml
fuzz/src/bin/gen_target.sh
fuzz/src/bin/process_network_graph_target.rs [new file with mode: 0644]
fuzz/src/lib.rs
fuzz/src/process_network_graph.rs [new file with mode: 0644]
fuzz/src/router.rs
fuzz/targets.h
lightning-invoice/src/lib.rs
lightning-invoice/src/payment.rs
lightning-rapid-gossip-sync/Cargo.toml [new file with mode: 0644]
lightning-rapid-gossip-sync/README.md [new file with mode: 0644]
lightning-rapid-gossip-sync/res/.gitignore [new file with mode: 0644]
lightning-rapid-gossip-sync/src/error.rs [new file with mode: 0644]
lightning-rapid-gossip-sync/src/lib.rs [new file with mode: 0644]
lightning-rapid-gossip-sync/src/processing.rs [new file with mode: 0644]
lightning/src/chain/channelmonitor.rs
lightning/src/chain/mod.rs
lightning/src/lib.rs
lightning/src/ln/chan_utils.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/features.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/monitor_tests.rs
lightning/src/ln/msgs.rs
lightning/src/ln/payment_tests.rs
lightning/src/ln/peer_channel_encryptor.rs
lightning/src/ln/priv_short_conf_tests.rs
lightning/src/ln/reorg_tests.rs
lightning/src/ln/script.rs
lightning/src/ln/wire.rs
lightning/src/routing/network_graph.rs
lightning/src/routing/router.rs
lightning/src/routing/scoring.rs
lightning/src/util/config.rs
lightning/src/util/events.rs
lightning/src/util/scid_utils.rs
lightning/src/util/ser.rs
lightning/src/util/test_utils.rs

index e5657670c10706dfbc274144d4e9289ba265ef62..dab24fe3dd37f2aedbbe45c70f826eecda517bcb 100644 (file)
@@ -3,3 +3,4 @@
 [*]
 indent_style = tab
 insert_final_newline = true
+trim_trailing_whitespace = true
index cff1713440035a0b76d89a9e6c88904c97c0b87e..59a11b4e4a11efc7788b51fe04ad664ca15da7f9 100644 (file)
@@ -141,6 +141,7 @@ jobs:
         run: |
           cargo test --verbose --color always  -p lightning
           cargo test --verbose --color always  -p lightning-invoice
+          cargo test --verbose --color always  -p lightning-rapid-gossip-sync
           cargo build --verbose  --color always -p lightning-persister
           cargo build --verbose  --color always -p lightning-background-processor
       - name: Test C Bindings Modifications on Rust ${{ matrix.toolchain }}
@@ -221,11 +222,24 @@ jobs:
       - name: Fetch routing graph snapshot
         if: steps.cache-graph.outputs.cache-hit != 'true'
         run: |
-          wget -O lightning/net_graph-2021-05-31.bin https://bitcoin.ninja/ldk-net_graph-v0.0.15-2021-05-31.bin
-          if [ "$(sha256sum lightning/net_graph-2021-05-31.bin | awk '{ print $1 }')" != "05a5361278f68ee2afd086cc04a1f927a63924be451f3221d380533acfacc303" ]; then
+          curl --verbose -L -o lightning/net_graph-2021-05-31.bin https://bitcoin.ninja/ldk-net_graph-v0.0.15-2021-05-31.bin
+          echo "Sha sum: $(sha256sum lightning/net_graph-2021-05-31.bin | awk '{ print $1 }')"
+          if [ "$(sha256sum lightning/net_graph-2021-05-31.bin | awk '{ print $1 }')" != "${EXPECTED_ROUTING_GRAPH_SNAPSHOT_SHASUM}" ]; then
             echo "Bad hash"
             exit 1
           fi
+        env:
+          EXPECTED_ROUTING_GRAPH_SNAPSHOT_SHASUM: 05a5361278f68ee2afd086cc04a1f927a63924be451f3221d380533acfacc303
+      - name: Fetch rapid graph sync reference input
+        run: |
+          curl --verbose -L -o lightning-rapid-gossip-sync/res/full_graph.lngossip https://bitcoin.ninja/ldk-compressed_graph-bc08df7542-2022-05-05.bin
+          echo "Sha sum: $(sha256sum lightning-rapid-gossip-sync/res/full_graph.lngossip | awk '{ print $1 }')"
+          if [ "$(sha256sum lightning-rapid-gossip-sync/res/full_graph.lngossip | awk '{ print $1 }')" != "${EXPECTED_RAPID_GOSSIP_SHASUM}" ]; then
+            echo "Bad hash"
+            exit 1
+          fi
+        env:
+          EXPECTED_RAPID_GOSSIP_SHASUM: 9637b91cea9d64320cf48fc0787c70fe69fc062f90d3512e207044110cadfd7b
       - name: Test with Network Graph on Rust ${{ matrix.toolchain }}
         run: |
           cd lightning
index 6e03fc1ac4cfc4bab1feed65368651bc5135cb9e..f263dc8eccb16414c5e440b70c02bd6e9e4ab2df 100644 (file)
@@ -7,6 +7,7 @@ members = [
     "lightning-net-tokio",
     "lightning-persister",
     "lightning-background-processor",
+    "lightning-rapid-gossip-sync"
 ]
 
 exclude = [
index 88e577617b54faeab07ba0e61a213d29f726b1ba..66dabcfe4be3242eac2240a287476745c8f03e2a 100644 (file)
@@ -19,6 +19,7 @@ stdin_fuzz = []
 [dependencies]
 afl = { version = "0.4", optional = true }
 lightning = { path = "../lightning", features = ["regex"] }
+lightning-rapid-gossip-sync = { path = "../lightning-rapid-gossip-sync" }
 bitcoin = { version = "0.28.1", features = ["secp-lowmemory"] }
 hex = "0.3"
 honggfuzz = { version = "0.5", optional = true }
index eb07df6342f86dc6d99904953f376f414de5289f..72fefe51609103c7ed7c8cbf74ed2b33f6b9552f 100755 (executable)
@@ -10,6 +10,7 @@ GEN_TEST chanmon_deser
 GEN_TEST chanmon_consistency
 GEN_TEST full_stack
 GEN_TEST peer_crypt
+GEN_TEST process_network_graph
 GEN_TEST router
 GEN_TEST zbase32
 
diff --git a/fuzz/src/bin/process_network_graph_target.rs b/fuzz/src/bin/process_network_graph_target.rs
new file mode 100644 (file)
index 0000000..380efdf
--- /dev/null
@@ -0,0 +1,113 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+// This file is auto-generated by gen_target.sh based on target_template.txt
+// To modify it, modify target_template.txt and run gen_target.sh instead.
+
+#![cfg_attr(feature = "libfuzzer_fuzz", no_main)]
+
+#[cfg(not(fuzzing))]
+compile_error!("Fuzz targets need cfg=fuzzing");
+
+extern crate lightning_fuzz;
+use lightning_fuzz::process_network_graph::*;
+
+#[cfg(feature = "afl")]
+#[macro_use] extern crate afl;
+#[cfg(feature = "afl")]
+fn main() {
+       fuzz!(|data| {
+               process_network_graph_run(data.as_ptr(), data.len());
+       });
+}
+
+#[cfg(feature = "honggfuzz")]
+#[macro_use] extern crate honggfuzz;
+#[cfg(feature = "honggfuzz")]
+fn main() {
+       loop {
+               fuzz!(|data| {
+                       process_network_graph_run(data.as_ptr(), data.len());
+               });
+       }
+}
+
+#[cfg(feature = "libfuzzer_fuzz")]
+#[macro_use] extern crate libfuzzer_sys;
+#[cfg(feature = "libfuzzer_fuzz")]
+fuzz_target!(|data: &[u8]| {
+       process_network_graph_run(data.as_ptr(), data.len());
+});
+
+#[cfg(feature = "stdin_fuzz")]
+fn main() {
+       use std::io::Read;
+
+       let mut data = Vec::with_capacity(8192);
+       std::io::stdin().read_to_end(&mut data).unwrap();
+       process_network_graph_run(data.as_ptr(), data.len());
+}
+
+#[test]
+fn run_test_cases() {
+       use std::fs;
+       use std::io::Read;
+       use lightning_fuzz::utils::test_logger::StringBuffer;
+
+       use std::sync::{atomic, Arc};
+       {
+               let data: Vec<u8> = vec![0];
+               process_network_graph_run(data.as_ptr(), data.len());
+       }
+       let mut threads = Vec::new();
+       let threads_running = Arc::new(atomic::AtomicUsize::new(0));
+       if let Ok(tests) = fs::read_dir("test_cases/process_network_graph") {
+               for test in tests {
+                       let mut data: Vec<u8> = Vec::new();
+                       let path = test.unwrap().path();
+                       fs::File::open(&path).unwrap().read_to_end(&mut data).unwrap();
+                       threads_running.fetch_add(1, atomic::Ordering::AcqRel);
+
+                       let thread_count_ref = Arc::clone(&threads_running);
+                       let main_thread_ref = std::thread::current();
+                       threads.push((path.file_name().unwrap().to_str().unwrap().to_string(),
+                               std::thread::spawn(move || {
+                                       let string_logger = StringBuffer::new();
+
+                                       let panic_logger = string_logger.clone();
+                                       let res = if ::std::panic::catch_unwind(move || {
+                                               process_network_graph_test(&data, panic_logger);
+                                       }).is_err() {
+                                               Some(string_logger.into_string())
+                                       } else { None };
+                                       thread_count_ref.fetch_sub(1, atomic::Ordering::AcqRel);
+                                       main_thread_ref.unpark();
+                                       res
+                               })
+                       ));
+                       while threads_running.load(atomic::Ordering::Acquire) > 32 {
+                               std::thread::park();
+                       }
+               }
+       }
+       let mut failed_outputs = Vec::new();
+       for (test, thread) in threads.drain(..) {
+               if let Some(output) = thread.join().unwrap() {
+                       println!("\nOutput of {}:\n{}\n", test, output);
+                       failed_outputs.push(test);
+               }
+       }
+       if !failed_outputs.is_empty() {
+               println!("Test cases which failed: ");
+               for case in failed_outputs {
+                       println!("{}", case);
+               }
+               panic!();
+       }
+}
index a0cc42b8189f9ae8837fb8ea64106c902185781d..5e158aee36ffe050e6f53176ddd1fc7887935871 100644 (file)
@@ -9,6 +9,7 @@
 
 extern crate bitcoin;
 extern crate lightning;
+extern crate lightning_rapid_gossip_sync;
 extern crate hex;
 
 pub mod utils;
@@ -17,6 +18,7 @@ pub mod chanmon_deser;
 pub mod chanmon_consistency;
 pub mod full_stack;
 pub mod peer_crypt;
+pub mod process_network_graph;
 pub mod router;
 pub mod zbase32;
 
diff --git a/fuzz/src/process_network_graph.rs b/fuzz/src/process_network_graph.rs
new file mode 100644 (file)
index 0000000..3f30335
--- /dev/null
@@ -0,0 +1,20 @@
+// Import that needs to be added manually
+use utils::test_logger;
+
+/// Actual fuzz test, method signature and name are fixed
+fn do_test(data: &[u8]) {
+       let block_hash = bitcoin::BlockHash::default();
+       let network_graph = lightning::routing::network_graph::NetworkGraph::new(block_hash);
+       lightning_rapid_gossip_sync::processing::update_network_graph(&network_graph, data);
+}
+
+/// Method that needs to be added manually, {name}_test
+pub fn process_network_graph_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
+       do_test(data);
+}
+
+/// Method that needs to be added manually, {name}_run
+#[no_mangle]
+pub extern "C" fn process_network_graph_run(data: *const u8, datalen: usize) {
+       do_test(unsafe { std::slice::from_raw_parts(data, datalen) });
+}
index 786bfa3e589eb05dc393a807be90efc46c5febf7..80ea1f1bc73d6932e4ab3db1a2da8904b0f77f14 100644 (file)
@@ -222,6 +222,7 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                                                channel_type: None,
                                                                short_channel_id: Some(scid),
                                                                inbound_scid_alias: None,
+                                                               outbound_scid_alias: None,
                                                                channel_value_satoshis: capacity,
                                                                user_channel_id: 0, inbound_capacity_msat: 0,
                                                                unspendable_punishment_reserve: None,
index 5d45e3d02388ea9a1659bf0cbb6e88e379d2f940..798fb66479519cf9b00a84ce3d5873787c449b58 100644 (file)
@@ -3,6 +3,7 @@ void chanmon_deser_run(const unsigned char* data, size_t data_len);
 void chanmon_consistency_run(const unsigned char* data, size_t data_len);
 void full_stack_run(const unsigned char* data, size_t data_len);
 void peer_crypt_run(const unsigned char* data, size_t data_len);
+void process_network_graph_run(const unsigned char* data, size_t data_len);
 void router_run(const unsigned char* data, size_t data_len);
 void zbase32_run(const unsigned char* data, size_t data_len);
 void msg_accept_channel_run(const unsigned char* data, size_t data_len);
index 9fcb4af1b624257b6818d1c121c4dd9f7c6c6eaa..008d3344b55c5f2becae9cfd553d1c3f3f84dc2a 100644 (file)
@@ -11,7 +11,7 @@
 #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
 
 //! This crate provides data structures to represent
-//! [lightning BOLT11](https://github.com/lightningnetwork/lightning-rfc/blob/master/11-payment-encoding.md)
+//! [lightning BOLT11](https://github.com/lightning/bolts/blob/master/11-payment-encoding.md)
 //! invoices and functions to create, encode and decode these. If you just want to use the standard
 //! en-/decoding functionality this should get you started:
 //!
@@ -795,18 +795,15 @@ impl SignedRawInvoice {
 /// variant. If no element was found `None` gets returned.
 ///
 /// The following example would extract the first B.
-/// ```
-/// use Enum::*
 ///
 /// enum Enum {
 ///    A(u8),
 ///    B(u16)
 /// }
 ///
-/// let elements = vec![A(1), A(2), B(3), A(4)]
+/// let elements = vec![Enum::A(1), Enum::A(2), Enum::B(3), Enum::A(4)];
 ///
-/// assert_eq!(find_extract!(elements.iter(), Enum::B(ref x), x), Some(3u16))
-/// ```
+/// assert_eq!(find_extract!(elements.iter(), Enum::B(x), x), Some(3u16));
 macro_rules! find_extract {
        ($iter:expr, $enm:pat, $enm_var:ident) => {
                find_all_extract!($iter, $enm, $enm_var).next()
@@ -817,20 +814,18 @@ macro_rules! find_extract {
 /// variant through an iterator.
 ///
 /// The following example would extract all A.
-/// ```
-/// use Enum::*
 ///
 /// enum Enum {
 ///    A(u8),
 ///    B(u16)
 /// }
 ///
-/// let elements = vec![A(1), A(2), B(3), A(4)]
+/// let elements = vec![Enum::A(1), Enum::A(2), Enum::B(3), Enum::A(4)];
 ///
 /// assert_eq!(
-///    find_all_extract!(elements.iter(), Enum::A(ref x), x).collect::<Vec<u8>>(),
-///    vec![1u8, 2u8, 4u8])
-/// ```
+///    find_all_extract!(elements.iter(), Enum::A(x), x).collect::<Vec<u8>>(),
+///    vec![1u8, 2u8, 4u8]
+/// );
 macro_rules! find_all_extract {
        ($iter:expr, $enm:pat, $enm_var:ident) => {
                $iter.filter_map(|tf| match *tf {
index d6a3abb0ddf60c8f8c0d47af92c30a7ca695e904..c095f74dd16e2e0dc9c3c41dacf7122c75e179a9 100644 (file)
@@ -38,9 +38,9 @@
 //! # use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
 //! # use lightning::ln::channelmanager::{ChannelDetails, PaymentId, PaymentSendFailure};
 //! # use lightning::ln::msgs::LightningError;
-//! # use lightning::routing::scoring::Score;
 //! # use lightning::routing::network_graph::NodeId;
 //! # use lightning::routing::router::{Route, RouteHop, RouteParameters};
+//! # use lightning::routing::scoring::{ChannelUsage, Score};
 //! # use lightning::util::events::{Event, EventHandler, EventsProvider};
 //! # use lightning::util::logger::{Logger, Record};
 //! # use lightning::util::ser::{Writeable, Writer};
@@ -90,7 +90,7 @@
 //! # }
 //! # impl Score for FakeScorer {
 //! #     fn channel_penalty_msat(
-//! #         &self, _short_channel_id: u64, _send_amt: u64, _chan_amt: u64, _source: &NodeId, _target: &NodeId
+//! #         &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage
 //! #     ) -> u64 { 0 }
 //! #     fn payment_path_failed(&mut self, _path: &[&RouteHop], _short_channel_id: u64) {}
 //! #     fn payment_path_successful(&mut self, _path: &[&RouteHop]) {}
@@ -604,6 +604,7 @@ mod tests {
        use lightning::ln::msgs::{ChannelMessageHandler, ErrorAction, LightningError};
        use lightning::routing::network_graph::NodeId;
        use lightning::routing::router::{PaymentParameters, Route, RouteHop};
+       use lightning::routing::scoring::ChannelUsage;
        use lightning::util::test_utils::TestLogger;
        use lightning::util::errors::APIError;
        use lightning::util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
@@ -1444,7 +1445,7 @@ mod tests {
 
        impl Score for TestScorer {
                fn channel_penalty_msat(
-                       &self, _short_channel_id: u64, _send_amt: u64, _chan_amt: u64, _source: &NodeId, _target: &NodeId
+                       &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage
                ) -> u64 { 0 }
 
                fn payment_path_failed(&mut self, actual_path: &[&RouteHop], actual_short_channel_id: u64) {
diff --git a/lightning-rapid-gossip-sync/Cargo.toml b/lightning-rapid-gossip-sync/Cargo.toml
new file mode 100644 (file)
index 0000000..58446a4
--- /dev/null
@@ -0,0 +1,20 @@
+[package]
+name = "lightning-rapid-gossip-sync"
+version = "0.0.106"
+authors = ["Arik Sosman <git@arik.io>"]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/lightningdevkit/rust-lightning"
+edition = "2018"
+description = """
+Utility to process gossip routing data from Rapid Gossip Sync Server.
+"""
+
+[features]
+_bench_unstable = []
+
+[dependencies]
+lightning = { version = "0.0.106", path = "../lightning" }
+bitcoin = { version = "0.28.1", default-features = false }
+
+[dev-dependencies]
+lightning = { version = "0.0.106", path = "../lightning", features = ["_test_utils"] }
diff --git a/lightning-rapid-gossip-sync/README.md b/lightning-rapid-gossip-sync/README.md
new file mode 100644 (file)
index 0000000..86d4981
--- /dev/null
@@ -0,0 +1,120 @@
+# lightning-rapid-gossip-sync
+
+This crate exposes functionality for rapid gossip graph syncing, aimed primarily at mobile clients.
+Its server counterpart is the
+[rapid-gossip-sync-server](https://github.com/lightningdevkit/rapid-gossip-sync-server) repository.
+
+## Mechanism
+
+The (presumed) server sends a compressed gossip response containing gossip data. The gossip data is
+formatted compactly, omitting signatures and opportunistically incremental where previous channel
+updates are known.
+
+Essentially, the serialization structure is as follows:
+
+1. Fixed prefix bytes `76, 68, 75, 1` (the first three bytes are ASCII for `LDK`)
+    - The purpose of this prefix is to identify the serialization format, should other rapid gossip
+      sync formats arise in the future
+    - The fourth byte is the protocol version in case our format gets updated
+2. Chain hash (32 bytes)
+3. Latest seen timestamp (`u32`)
+4. An unsigned int indicating the number of node IDs to follow
+5. An array of compressed node ID pubkeys (all pubkeys are presumed to be standard
+   compressed 33-byte-serializations)
+6. An unsigned int indicating the number of channel announcement messages to follow
+7. An array of significantly stripped down customized channel announcements
+8. An unsigned int indicating the number of channel update messages to follow
+9. A series of default values used for non-incremental channel updates
+    - The values are defined as follows:
+        1. `default_cltv_expiry_delta`
+        2. `default_htlc_minimum_msat`
+        3. `default_fee_base_msat`
+        4. `default_fee_proportional_millionths`
+        5. `default_htlc_maximum_msat` (`u64`, and if the default is no maximum, `u64::MAX`)
+    - The defaults are calculated by the server based on the frequency among non-incremental
+      updates within a given delta set
+10. An array of customized channel updates
+
+You will also notice that `NodeAnnouncement` messages are omitted altogether as the node IDs are
+implicitly extracted from the channel announcements and updates.
+
+The data is then applied to the current network graph, artificially dated to the timestamp of the
+latest seen message less one week, be it an announcement or an update, from the server's
+perspective. The network graph should not be pruned until the graph sync completes.
+
+### Custom Channel Announcement
+
+To achieve compactness and avoid data repetition, we're sending a significantly stripped down
+version of the channel announcement message, which contains only the following data:
+
+1. `channel_features`: `u16` + `n`, where `n` is the number of bytes indicated by the first `u16`
+2. `short_channel_id`: `CompactSize` (incremental `CompactSize` deltas starting from 0)
+3. `node_id_1_index`: `CompactSize` (index of node id within the previously sent sequence)
+4. `node_id_2_index`: `CompactSize` (index of node id within the previously sent sequence)
+
+### Custom Channel Update
+
+For the purpose of rapid syncing, we have deviated from the channel update format specified in
+BOLT 7 significantly. Our custom channel updates are structured as follows:
+
+1. `short_channel_id`: `CompactSize` (incremental `CompactSize` deltas starting at 0)
+2. `custom_channel_flags`: `u8`
+3. `update_data`
+
+Specifically, our custom channel flags break down like this:
+
+| 128                 | 64 | 32 | 16 | 8 | 4 | 2                | 1         |
+|---------------------|----|----|----|---|---|------------------|-----------|
+| Incremental update? |    |    |    |   |   | Disable channel? | Direction |
+
+If the most significant bit is set to `1`, indicating an incremental update, the intermediate bit
+flags assume the following meaning:
+
+| 64                              | 32                              | 16                          | 8                                         | 4                               |
+|---------------------------------|---------------------------------|-----------------------------|-------------------------------------------|---------------------------------|
+| `cltv_expiry_delta` has changed | `htlc_minimum_msat` has changed | `fee_base_msat` has changed | `fee_proportional_millionths` has changed | `htlc_maximum_msat` has changed |
+
+If the most significant bit is set to `0`, the meaning is almost identical, except instead of a
+change, the flags now represent a deviation from the defaults sent at the beginning of the update
+sequence.
+
+In both cases, `update_data` only contains the fields that are indicated by the channel flags to be
+non-default or to have mutated.
+
+## Delta Calculation
+
+The way a server is meant to calculate this rapid gossip sync data is by taking the latest time
+any change, be it either an announcement or an update, was seen. That timestamp is included in each
+rapid sync message, so all the client needs to do is cache one variable.
+
+If a particular channel update had never occurred before, the full update is sent. If a channel has
+had updates prior to the provided timestamp, the latest update prior to the timestamp is taken as a
+reference, and the delta is calculated against it.
+
+Depending on whether the rapid sync message is calculated on the fly or a snapshotted version is
+returned, intermediate changes between the latest update seen by the client and the latest update
+broadcast on the network may be taken into account when calculating the delta.
+
+## Performance
+
+Given the primary purpose of this utility is a faster graph sync, we thought it might be helpful to
+provide some examples of various delta sets. These examples were calculated as of May 19th  2022
+with a network graph comprised of 80,000 channel announcements and 160,000 directed channel updates.
+
+| Full sync                   |        |
+|-----------------------------|--------|
+| Message Length              | 4.7 MB |
+| Gzipped Message Length      | 2.0 MB |
+| Client-side Processing Time | 1.4 s  |
+
+| Week-old sync               |        |
+|-----------------------------|--------|
+| Message Length              | 2.7 MB |
+| Gzipped Message Length      | 862 kB |
+| Client-side Processing Time | 907 ms |
+
+| Day-old sync                |         |
+|-----------------------------|---------|
+| Message Length              | 191 kB  |
+| Gzipped Message Length      | 92.8 kB |
+| Client-side Processing Time | 196 ms  |
diff --git a/lightning-rapid-gossip-sync/res/.gitignore b/lightning-rapid-gossip-sync/res/.gitignore
new file mode 100644 (file)
index 0000000..d6b7ef3
--- /dev/null
@@ -0,0 +1,2 @@
+*
+!.gitignore
diff --git a/lightning-rapid-gossip-sync/src/error.rs b/lightning-rapid-gossip-sync/src/error.rs
new file mode 100644 (file)
index 0000000..fee8fea
--- /dev/null
@@ -0,0 +1,40 @@
+use core::fmt::Debug;
+use std::fmt::Formatter;
+use lightning::ln::msgs::{DecodeError, LightningError};
+
+/// All-encompassing standard error type that processing can return
+pub enum GraphSyncError {
+       /// Error trying to read the update data, typically due to an erroneous data length indication
+       /// that is greater than the actual amount of data provided
+       DecodeError(DecodeError),
+       /// Error applying the patch to the network graph, usually the result of updates that are too
+       /// old or missing prerequisite data to the application of updates out of order
+       LightningError(LightningError),
+}
+
+impl From<std::io::Error> for GraphSyncError {
+       fn from(error: std::io::Error) -> Self {
+               Self::DecodeError(DecodeError::Io(error.kind()))
+       }
+}
+
+impl From<DecodeError> for GraphSyncError {
+       fn from(error: DecodeError) -> Self {
+               Self::DecodeError(error)
+       }
+}
+
+impl From<LightningError> for GraphSyncError {
+       fn from(error: LightningError) -> Self {
+               Self::LightningError(error)
+       }
+}
+
+impl Debug for GraphSyncError {
+       fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+               match self {
+                       GraphSyncError::DecodeError(e) => f.write_fmt(format_args!("DecodeError: {:?}", e)),
+                       GraphSyncError::LightningError(e) => f.write_fmt(format_args!("LightningError: {:?}", e))
+               }
+       }
+}
diff --git a/lightning-rapid-gossip-sync/src/lib.rs b/lightning-rapid-gossip-sync/src/lib.rs
new file mode 100644 (file)
index 0000000..123f323
--- /dev/null
@@ -0,0 +1,244 @@
+#![deny(missing_docs)]
+#![deny(unsafe_code)]
+#![deny(broken_intra_doc_links)]
+#![deny(non_upper_case_globals)]
+#![deny(non_camel_case_types)]
+#![deny(non_snake_case)]
+#![deny(unused_mut)]
+#![deny(unused_variables)]
+#![deny(unused_imports)]
+//! This crate exposes functionality to rapidly sync gossip data, aimed primarily at mobile
+//! devices.
+//!
+//! The server sends a compressed response containing differential gossip data. The gossip data is
+//! formatted compactly, omitting signatures and opportunistically incremental where previous
+//! channel updates are known (a mechanism that is enabled when the timestamp of the last known
+//! channel update is communicated). A reference server implementation can be found
+//! [here](https://github.com/lightningdevkit/rapid-gossip-sync-server).
+//!
+//! An example server request could look as simple as the following. Note that the first ever rapid
+//! sync should use `0` for `last_sync_timestamp`:
+//!
+//! ```shell
+//! curl -o rapid_sync.lngossip https://rapidsync.lightningdevkit.org/snapshot/<last_sync_timestamp>
+//! ```
+//!
+//! Then, call the network processing function. In this example, we process the update by reading
+//! its contents from disk, which we do by calling the `sync_network_graph_with_file_path` method:
+//!
+//! ```
+//! use bitcoin::blockdata::constants::genesis_block;
+//! use bitcoin::Network;
+//! use lightning::routing::network_graph::NetworkGraph;
+//!
+//! let block_hash = genesis_block(Network::Bitcoin).header.block_hash();
+//! let network_graph = NetworkGraph::new(block_hash);
+//! let new_last_sync_timestamp_result = lightning_rapid_gossip_sync::sync_network_graph_with_file_path(&network_graph, "./rapid_sync.lngossip");
+//! ```
+//!
+//! The primary benefit this syncing mechanism provides is that given a trusted server, a
+//! low-powered client can offload the validation of gossip signatures. This enables a client to
+//! privately calculate routes for payments, and do so much faster and earlier than requiring a full
+//! peer-to-peer gossip sync to complete.
+//!
+//! The reason the rapid sync server requires trust is that it could provide bogus data, though at
+//! worst, all that would result in is a fake network topology, which wouldn't enable the server to
+//! steal or siphon off funds. It could, however, reduce the client's privacy by forcing all
+//! payments to be routed via channels the server controls.
+//!
+//! The way a server is meant to calculate this rapid gossip sync data is by using a `latest_seen`
+//! timestamp provided by the client. It's not included in either channel announcement or update,
+//! (not least due to announcements not including any timestamps at all, but only a block height)
+//! but rather, it's a timestamp of when the server saw a particular message.
+
+// Allow and import test features for benching
+#![cfg_attr(all(test, feature = "_bench_unstable"), feature(test))]
+#[cfg(all(test, feature = "_bench_unstable"))]
+extern crate test;
+
+use std::fs::File;
+
+use lightning::routing::network_graph;
+
+use crate::error::GraphSyncError;
+
+/// Error types that these functions can return
+pub mod error;
+
+/// Core functionality of this crate
+pub mod processing;
+
+/// Sync gossip data from a file
+/// Returns the last sync timestamp to be used the next time rapid sync data is queried.
+///
+/// `network_graph`: The network graph to apply the updates to
+///
+/// `sync_path`: Path to the file where the gossip update data is located
+///
+pub fn sync_network_graph_with_file_path(
+       network_graph: &network_graph::NetworkGraph,
+       sync_path: &str,
+) -> Result<u32, GraphSyncError> {
+       let mut file = File::open(sync_path)?;
+       processing::update_network_graph_from_byte_stream(&network_graph, &mut file)
+}
+
+#[cfg(test)]
+mod tests {
+       use std::fs;
+
+       use bitcoin::blockdata::constants::genesis_block;
+       use bitcoin::Network;
+
+       use lightning::ln::msgs::DecodeError;
+       use lightning::routing::network_graph::NetworkGraph;
+
+       use crate::sync_network_graph_with_file_path;
+
+       #[test]
+       fn test_sync_from_file() {
+               struct FileSyncTest {
+                       directory: String,
+               }
+
+               impl FileSyncTest {
+                       fn new(tmp_directory: &str, valid_response: &[u8]) -> FileSyncTest {
+                               let test = FileSyncTest { directory: tmp_directory.to_owned() };
+
+                               let graph_sync_test_directory = test.get_test_directory();
+                               fs::create_dir_all(graph_sync_test_directory).unwrap();
+
+                               let graph_sync_test_file = test.get_test_file_path();
+                               fs::write(&graph_sync_test_file, valid_response).unwrap();
+
+                               test
+                       }
+                       fn get_test_directory(&self) -> String {
+                               let graph_sync_test_directory = self.directory.clone() + "/graph-sync-tests";
+                               graph_sync_test_directory
+                       }
+                       fn get_test_file_path(&self) -> String {
+                               let graph_sync_test_directory = self.get_test_directory();
+                               let graph_sync_test_file = graph_sync_test_directory.to_owned() + "/test_data.lngossip";
+                               graph_sync_test_file
+                       }
+               }
+
+               impl Drop for FileSyncTest {
+                       fn drop(&mut self) {
+                               fs::remove_dir_all(self.directory.clone()).unwrap();
+                       }
+               }
+
+               // same as incremental_only_update_fails_without_prior_same_direction_updates
+               let valid_response = vec![
+                       76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
+                       79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
+                       0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
+                       187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
+                       157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
+                       88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
+                       204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
+                       181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
+                       110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
+                       76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
+                       226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
+                       0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
+                       0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
+               ];
+
+               let tmp_directory = "./rapid-gossip-sync-tests-tmp";
+               let sync_test = FileSyncTest::new(tmp_directory, &valid_response);
+               let graph_sync_test_file = sync_test.get_test_file_path();
+
+               let block_hash = genesis_block(Network::Bitcoin).block_hash();
+               let network_graph = NetworkGraph::new(block_hash);
+
+               assert_eq!(network_graph.read_only().channels().len(), 0);
+
+               let sync_result = sync_network_graph_with_file_path(&network_graph, &graph_sync_test_file);
+
+               if sync_result.is_err() {
+                       panic!("Unexpected sync result: {:?}", sync_result)
+               }
+
+               assert_eq!(network_graph.read_only().channels().len(), 2);
+               let after = network_graph.to_string();
+               assert!(
+                       after.contains("021607cfce19a4c5e7e6e738663dfafbbbac262e4ff76c2c9b30dbeefc35c00643")
+               );
+               assert!(
+                       after.contains("02247d9db0dfafea745ef8c9e161eb322f73ac3f8858d8730b6fd97254747ce76b")
+               );
+               assert!(
+                       after.contains("029e01f279986acc83ba235d46d80aede0b7595f410353b93a8ab540bb677f4432")
+               );
+               assert!(
+                       after.contains("02c913118a8895b9e29c89af6e20ed00d95a1f64e4952edbafa84d048f26804c61")
+               );
+               assert!(after.contains("619737530008010752"));
+               assert!(after.contains("783241506229452801"));
+       }
+
+       #[test]
+       fn measure_native_read_from_file() {
+               let block_hash = genesis_block(Network::Bitcoin).block_hash();
+               let network_graph = NetworkGraph::new(block_hash);
+
+               assert_eq!(network_graph.read_only().channels().len(), 0);
+
+               let start = std::time::Instant::now();
+               let sync_result =
+                       sync_network_graph_with_file_path(&network_graph, "./res/full_graph.lngossip");
+               if let Err(crate::error::GraphSyncError::DecodeError(DecodeError::Io(io_error))) = &sync_result {
+                       let error_string = format!("Input file lightning-graph-sync/res/full_graph.lngossip is missing! Download it from https://bitcoin.ninja/ldk-compressed_graph-bc08df7542-2022-05-05.bin\n\n{:?}", io_error);
+                       #[cfg(not(require_route_graph_test))]
+                       {
+                               println!("{}", error_string);
+                               return;
+                       }
+                       #[cfg(require_route_graph_test)]
+                       panic!("{}", error_string);
+               }
+               let elapsed = start.elapsed();
+               println!("initialization duration: {:?}", elapsed);
+               if sync_result.is_err() {
+                       panic!("Unexpected sync result: {:?}", sync_result)
+               }
+       }
+}
+
+#[cfg(all(test, feature = "_bench_unstable"))]
+pub mod bench {
+       use test::Bencher;
+
+       use bitcoin::blockdata::constants::genesis_block;
+       use bitcoin::Network;
+
+       use lightning::ln::msgs::DecodeError;
+       use lightning::routing::network_graph::NetworkGraph;
+
+       use crate::sync_network_graph_with_file_path;
+
+       #[bench]
+       fn bench_reading_full_graph_from_file(b: &mut Bencher) {
+               let block_hash = genesis_block(Network::Bitcoin).block_hash();
+               b.iter(|| {
+                       let network_graph = NetworkGraph::new(block_hash);
+                       let sync_result = sync_network_graph_with_file_path(
+                               &network_graph,
+                               "./res/full_graph.lngossip",
+                       );
+                       if let Err(crate::error::GraphSyncError::DecodeError(DecodeError::Io(io_error))) = &sync_result {
+                               let error_string = format!("Input file lightning-graph-sync/res/full_graph.lngossip is missing! Download it from https://bitcoin.ninja/ldk-compressed_graph-bc08df7542-2022-05-05.bin\n\n{:?}", io_error);
+                               #[cfg(not(require_route_graph_test))]
+                               {
+                                       println!("{}", error_string);
+                                       return;
+                               }
+                               panic!("{}", error_string);
+                       }
+                       assert!(sync_result.is_ok())
+               });
+       }
+}
diff --git a/lightning-rapid-gossip-sync/src/processing.rs b/lightning-rapid-gossip-sync/src/processing.rs
new file mode 100644 (file)
index 0000000..ceb8b82
--- /dev/null
@@ -0,0 +1,499 @@
+use std::cmp::max;
+use std::io;
+use std::io::Read;
+
+use bitcoin::BlockHash;
+use bitcoin::secp256k1::PublicKey;
+
+use lightning::ln::msgs::{
+       DecodeError, ErrorAction, LightningError, OptionalField, UnsignedChannelUpdate,
+};
+use lightning::routing::network_graph;
+use lightning::util::ser::{BigSize, Readable};
+
+use crate::error::GraphSyncError;
+
+/// The purpose of this prefix is to identify the serialization format, should other rapid gossip
+/// sync formats arise in the future.
+///
+/// The fourth byte is the protocol version in case our format gets updated.
+const GOSSIP_PREFIX: [u8; 4] = [76, 68, 75, 1];
+
+/// Maximum vector allocation capacity for distinct node IDs. This constraint is necessary to
+/// avoid malicious updates being able to trigger excessive memory allocation.
+const MAX_INITIAL_NODE_ID_VECTOR_CAPACITY: u32 = 50_000;
+
+/// Update network graph from binary data.
+/// Returns the last sync timestamp to be used the next time rapid sync data is queried.
+///
+/// `network_graph`: network graph to be updated
+///
+/// `update_data`: `&[u8]` binary stream that comprises the update data
+pub fn update_network_graph(
+       network_graph: &network_graph::NetworkGraph,
+       update_data: &[u8],
+) -> Result<u32, GraphSyncError> {
+       let mut read_cursor = io::Cursor::new(update_data);
+       update_network_graph_from_byte_stream(&network_graph, &mut read_cursor)
+}
+
+pub(crate) fn update_network_graph_from_byte_stream<R: Read>(
+       network_graph: &network_graph::NetworkGraph,
+       mut read_cursor: &mut R,
+) -> Result<u32, GraphSyncError> {
+       let mut prefix = [0u8; 4];
+       read_cursor.read_exact(&mut prefix)?;
+
+       match prefix {
+               GOSSIP_PREFIX => {},
+               _ => {
+                       return Err(DecodeError::UnknownVersion.into());
+               }
+       };
+
+       let chain_hash: BlockHash = Readable::read(read_cursor)?;
+       let latest_seen_timestamp: u32 = Readable::read(read_cursor)?;
+       // backdate the applied timestamp by a week
+       let backdated_timestamp = latest_seen_timestamp.saturating_sub(24 * 3600 * 7);
+
+       let node_id_count: u32 = Readable::read(read_cursor)?;
+       let mut node_ids: Vec<PublicKey> = Vec::with_capacity(std::cmp::min(
+               node_id_count,
+               MAX_INITIAL_NODE_ID_VECTOR_CAPACITY,
+       ) as usize);
+       for _ in 0..node_id_count {
+               let current_node_id = Readable::read(read_cursor)?;
+               node_ids.push(current_node_id);
+       }
+
+       let mut previous_scid: u64 = 0;
+       let announcement_count: u32 = Readable::read(read_cursor)?;
+       for _ in 0..announcement_count {
+               let features = Readable::read(read_cursor)?;
+
+               // handle SCID
+               let scid_delta: BigSize = Readable::read(read_cursor)?;
+               let short_channel_id = previous_scid
+                       .checked_add(scid_delta.0)
+                       .ok_or(DecodeError::InvalidValue)?;
+               previous_scid = short_channel_id;
+
+               let node_id_1_index: BigSize = Readable::read(read_cursor)?;
+               let node_id_2_index: BigSize = Readable::read(read_cursor)?;
+               if max(node_id_1_index.0, node_id_2_index.0) >= node_id_count as u64 {
+                       return Err(DecodeError::InvalidValue.into());
+               };
+               let node_id_1 = node_ids[node_id_1_index.0 as usize];
+               let node_id_2 = node_ids[node_id_2_index.0 as usize];
+
+               let announcement_result = network_graph.add_channel_from_partial_announcement(
+                       short_channel_id,
+                       backdated_timestamp as u64,
+                       features,
+                       node_id_1,
+                       node_id_2,
+               );
+               if let Err(lightning_error) = announcement_result {
+                       if let ErrorAction::IgnoreDuplicateGossip = lightning_error.action {
+                               // everything is fine, just a duplicate channel announcement
+                       } else {
+                               return Err(lightning_error.into());
+                       }
+               }
+       }
+
+       previous_scid = 0; // updates start at a new scid
+
+       let update_count: u32 = Readable::read(read_cursor)?;
+       if update_count == 0 {
+               return Ok(latest_seen_timestamp);
+       }
+
+       // obtain default values for non-incremental updates
+       let default_cltv_expiry_delta: u16 = Readable::read(&mut read_cursor)?;
+       let default_htlc_minimum_msat: u64 = Readable::read(&mut read_cursor)?;
+       let default_fee_base_msat: u32 = Readable::read(&mut read_cursor)?;
+       let default_fee_proportional_millionths: u32 = Readable::read(&mut read_cursor)?;
+       let tentative_default_htlc_maximum_msat: u64 = Readable::read(&mut read_cursor)?;
+       let default_htlc_maximum_msat = if tentative_default_htlc_maximum_msat == u64::max_value() {
+               OptionalField::Absent
+       } else {
+               OptionalField::Present(tentative_default_htlc_maximum_msat)
+       };
+
+       for _ in 0..update_count {
+               let scid_delta: BigSize = Readable::read(read_cursor)?;
+               let short_channel_id = previous_scid
+                       .checked_add(scid_delta.0)
+                       .ok_or(DecodeError::InvalidValue)?;
+               previous_scid = short_channel_id;
+
+               let channel_flags: u8 = Readable::read(read_cursor)?;
+
+               // flags are always sent in full, and hence always need updating
+               let standard_channel_flags = channel_flags & 0b_0000_0011;
+
+               let mut synthetic_update = if channel_flags & 0b_1000_0000 == 0 {
+                       // full update, field flags will indicate deviations from the default
+                       UnsignedChannelUpdate {
+                               chain_hash,
+                               short_channel_id,
+                               timestamp: backdated_timestamp,
+                               flags: standard_channel_flags,
+                               cltv_expiry_delta: default_cltv_expiry_delta,
+                               htlc_minimum_msat: default_htlc_minimum_msat,
+                               htlc_maximum_msat: default_htlc_maximum_msat.clone(),
+                               fee_base_msat: default_fee_base_msat,
+                               fee_proportional_millionths: default_fee_proportional_millionths,
+                               excess_data: vec![],
+                       }
+               } else {
+                       // incremental update, field flags will indicate mutated values
+                       let read_only_network_graph = network_graph.read_only();
+                       let channel = read_only_network_graph
+                               .channels()
+                               .get(&short_channel_id)
+                               .ok_or(LightningError {
+                                       err: "Couldn't find channel for update".to_owned(),
+                                       action: ErrorAction::IgnoreError,
+                               })?;
+
+                       let directional_info = channel
+                               .get_directional_info(channel_flags)
+                               .ok_or(LightningError {
+                                       err: "Couldn't find previous directional data for update".to_owned(),
+                                       action: ErrorAction::IgnoreError,
+                               })?;
+
+                       let htlc_maximum_msat =
+                               if let Some(htlc_maximum_msat) = directional_info.htlc_maximum_msat {
+                                       OptionalField::Present(htlc_maximum_msat)
+                               } else {
+                                       OptionalField::Absent
+                               };
+
+                       UnsignedChannelUpdate {
+                               chain_hash,
+                               short_channel_id,
+                               timestamp: backdated_timestamp,
+                               flags: standard_channel_flags,
+                               cltv_expiry_delta: directional_info.cltv_expiry_delta,
+                               htlc_minimum_msat: directional_info.htlc_minimum_msat,
+                               htlc_maximum_msat,
+                               fee_base_msat: directional_info.fees.base_msat,
+                               fee_proportional_millionths: directional_info.fees.proportional_millionths,
+                               excess_data: vec![],
+                       }
+               };
+
+               if channel_flags & 0b_0100_0000 > 0 {
+                       let cltv_expiry_delta: u16 = Readable::read(read_cursor)?;
+                       synthetic_update.cltv_expiry_delta = cltv_expiry_delta;
+               }
+
+               if channel_flags & 0b_0010_0000 > 0 {
+                       let htlc_minimum_msat: u64 = Readable::read(read_cursor)?;
+                       synthetic_update.htlc_minimum_msat = htlc_minimum_msat;
+               }
+
+               if channel_flags & 0b_0001_0000 > 0 {
+                       let fee_base_msat: u32 = Readable::read(read_cursor)?;
+                       synthetic_update.fee_base_msat = fee_base_msat;
+               }
+
+               if channel_flags & 0b_0000_1000 > 0 {
+                       let fee_proportional_millionths: u32 = Readable::read(read_cursor)?;
+                       synthetic_update.fee_proportional_millionths = fee_proportional_millionths;
+               }
+
+               if channel_flags & 0b_0000_0100 > 0 {
+                       let tentative_htlc_maximum_msat: u64 = Readable::read(read_cursor)?;
+                       synthetic_update.htlc_maximum_msat = if tentative_htlc_maximum_msat == u64::max_value()
+                       {
+                               OptionalField::Absent
+                       } else {
+                               OptionalField::Present(tentative_htlc_maximum_msat)
+                       };
+               }
+
+               network_graph.update_channel_unsigned(&synthetic_update)?;
+       }
+
+       Ok(latest_seen_timestamp)
+}
+
+#[cfg(test)]
+mod tests {
+       use bitcoin::blockdata::constants::genesis_block;
+       use bitcoin::Network;
+
+       use lightning::ln::msgs::DecodeError;
+       use lightning::routing::network_graph::NetworkGraph;
+
+       use crate::error::GraphSyncError;
+       use crate::processing::update_network_graph;
+
+       #[test]
+       fn network_graph_fails_to_update_from_clipped_input() {
+               let block_hash = genesis_block(Network::Bitcoin).block_hash();
+               let network_graph = NetworkGraph::new(block_hash);
+
+               let example_input = vec![
+                       76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
+                       79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
+                       0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
+                       187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
+                       157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
+                       88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
+                       204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
+                       181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
+                       110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
+                       76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
+                       226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 0, 100,
+                       0, 0, 2, 224, 0, 0, 0, 0, 29, 129, 25, 192, 255, 8, 153, 192, 0, 2, 27, 0, 0, 36, 0, 0,
+                       0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 2, 68, 226, 0, 6, 11, 0, 1, 24, 0,
+                       0, 3, 232, 0, 0, 0,
+               ];
+               let update_result = update_network_graph(&network_graph, &example_input[..]);
+               assert!(update_result.is_err());
+               if let Err(GraphSyncError::DecodeError(DecodeError::ShortRead)) = update_result {
+                       // this is the expected error type
+               } else {
+                       panic!("Unexpected update result: {:?}", update_result)
+               }
+       }
+
+       #[test]
+       fn incremental_only_update_fails_without_prior_announcements() {
+               let incremental_update_input = vec![
+                       76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
+                       79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 229, 183, 167,
+                       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                       0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 136, 0, 0, 0, 221, 255, 2,
+                       68, 226, 0, 6, 11, 0, 1, 128,
+               ];
+
+               let block_hash = genesis_block(Network::Bitcoin).block_hash();
+               let network_graph = NetworkGraph::new(block_hash);
+
+               assert_eq!(network_graph.read_only().channels().len(), 0);
+
+               let update_result = update_network_graph(&network_graph, &incremental_update_input[..]);
+               assert!(update_result.is_err());
+               if let Err(GraphSyncError::LightningError(lightning_error)) = update_result {
+                       assert_eq!(lightning_error.err, "Couldn't find channel for update");
+               } else {
+                       panic!("Unexpected update result: {:?}", update_result)
+               }
+       }
+
+       #[test]
+       fn incremental_only_update_fails_without_prior_updates() {
+               let announced_update_input = vec![
+                       76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
+                       79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 229, 183, 167,
+                       0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
+                       187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
+                       157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
+                       88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
+                       204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
+                       181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
+                       110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
+                       76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
+                       226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 136, 0, 0, 0, 221, 255,
+                       2, 68, 226, 0, 6, 11, 0, 1, 128,
+               ];
+
+               let block_hash = genesis_block(Network::Bitcoin).block_hash();
+               let network_graph = NetworkGraph::new(block_hash);
+
+               assert_eq!(network_graph.read_only().channels().len(), 0);
+
+               let update_result = update_network_graph(&network_graph, &announced_update_input[..]);
+               assert!(update_result.is_err());
+               if let Err(GraphSyncError::LightningError(lightning_error)) = update_result {
+                       assert_eq!(
+                               lightning_error.err,
+                               "Couldn't find previous directional data for update"
+                       );
+               } else {
+                       panic!("Unexpected update result: {:?}", update_result)
+               }
+       }
+
+       #[test]
+       fn incremental_only_update_fails_without_prior_same_direction_updates() {
+               let initialization_input = vec![
+                       76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
+                       79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
+                       0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
+                       187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
+                       157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
+                       88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
+                       204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
+                       181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
+                       110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
+                       76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
+                       226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
+                       0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
+                       0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
+               ];
+
+               let block_hash = genesis_block(Network::Bitcoin).block_hash();
+               let network_graph = NetworkGraph::new(block_hash);
+
+               assert_eq!(network_graph.read_only().channels().len(), 0);
+
+               let initialization_result = update_network_graph(&network_graph, &initialization_input[..]);
+               if initialization_result.is_err() {
+                       panic!(
+                               "Unexpected initialization result: {:?}",
+                               initialization_result
+                       )
+               }
+
+               assert_eq!(network_graph.read_only().channels().len(), 2);
+               let initialized = network_graph.to_string();
+               assert!(initialized
+                       .contains("021607cfce19a4c5e7e6e738663dfafbbbac262e4ff76c2c9b30dbeefc35c00643"));
+               assert!(initialized
+                       .contains("02247d9db0dfafea745ef8c9e161eb322f73ac3f8858d8730b6fd97254747ce76b"));
+               assert!(initialized
+                       .contains("029e01f279986acc83ba235d46d80aede0b7595f410353b93a8ab540bb677f4432"));
+               assert!(initialized
+                       .contains("02c913118a8895b9e29c89af6e20ed00d95a1f64e4952edbafa84d048f26804c61"));
+               assert!(initialized.contains("619737530008010752"));
+               assert!(initialized.contains("783241506229452801"));
+
+               let opposite_direction_incremental_update_input = vec![
+                       76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
+                       79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 229, 183, 167,
+                       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                       0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 136, 0, 0, 0, 221, 255, 2,
+                       68, 226, 0, 6, 11, 0, 1, 128,
+               ];
+               let update_result = update_network_graph(
+                       &network_graph,
+                       &opposite_direction_incremental_update_input[..],
+               );
+               assert!(update_result.is_err());
+               if let Err(GraphSyncError::LightningError(lightning_error)) = update_result {
+                       assert_eq!(
+                               lightning_error.err,
+                               "Couldn't find previous directional data for update"
+                       );
+               } else {
+                       panic!("Unexpected update result: {:?}", update_result)
+               }
+       }
+
+       #[test]
+       fn incremental_update_succeeds_with_prior_announcements_and_full_updates() {
+               let initialization_input = vec![
+                       76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
+                       79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
+                       0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
+                       187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
+                       157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
+                       88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
+                       204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
+                       181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
+                       110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
+                       76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
+                       226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 4, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
+                       0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 56, 0, 0,
+                       0, 0, 0, 0, 0, 1, 0, 0, 0, 100, 0, 0, 2, 224, 0, 25, 0, 0, 0, 1, 0, 0, 0, 125, 255, 2,
+                       68, 226, 0, 6, 11, 0, 1, 4, 0, 0, 0, 0, 29, 129, 25, 192, 0, 5, 0, 0, 0, 0, 29, 129,
+                       25, 192,
+               ];
+
+               let block_hash = genesis_block(Network::Bitcoin).block_hash();
+               let network_graph = NetworkGraph::new(block_hash);
+
+               assert_eq!(network_graph.read_only().channels().len(), 0);
+
+               let initialization_result = update_network_graph(&network_graph, &initialization_input[..]);
+               assert!(initialization_result.is_ok());
+
+               let single_direction_incremental_update_input = vec![
+                       76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
+                       79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 229, 183, 167,
+                       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                       0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 136, 0, 0, 0, 221, 255, 2,
+                       68, 226, 0, 6, 11, 0, 1, 128,
+               ];
+               let update_result = update_network_graph(
+                       &network_graph,
+                       &single_direction_incremental_update_input[..],
+               );
+               if update_result.is_err() {
+                       panic!("Unexpected update result: {:?}", update_result)
+               }
+
+               assert_eq!(network_graph.read_only().channels().len(), 2);
+               let after = network_graph.to_string();
+               assert!(
+                       after.contains("021607cfce19a4c5e7e6e738663dfafbbbac262e4ff76c2c9b30dbeefc35c00643")
+               );
+               assert!(
+                       after.contains("02247d9db0dfafea745ef8c9e161eb322f73ac3f8858d8730b6fd97254747ce76b")
+               );
+               assert!(
+                       after.contains("029e01f279986acc83ba235d46d80aede0b7595f410353b93a8ab540bb677f4432")
+               );
+               assert!(
+                       after.contains("02c913118a8895b9e29c89af6e20ed00d95a1f64e4952edbafa84d048f26804c61")
+               );
+               assert!(after.contains("619737530008010752"));
+               assert!(after.contains("783241506229452801"));
+       }
+
+       #[test]
+       fn full_update_succeeds() {
+               let valid_input = vec![
+                       76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
+                       79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
+                       0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
+                       187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
+                       157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
+                       88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
+                       204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
+                       181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
+                       110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
+                       76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
+                       226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 4, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
+                       0, 0, 0, 1, 0, 0, 0, 0, 29, 129, 25, 192, 255, 8, 153, 192, 0, 2, 27, 0, 0, 60, 0, 0,
+                       0, 0, 0, 0, 0, 1, 0, 0, 0, 100, 0, 0, 2, 224, 0, 0, 0, 0, 58, 85, 116, 216, 0, 29, 0,
+                       0, 0, 1, 0, 0, 0, 125, 0, 0, 0, 0, 58, 85, 116, 216, 255, 2, 68, 226, 0, 6, 11, 0, 1,
+                       0, 0, 1,
+               ];
+
+               let block_hash = genesis_block(Network::Bitcoin).block_hash();
+               let network_graph = NetworkGraph::new(block_hash);
+
+               assert_eq!(network_graph.read_only().channels().len(), 0);
+
+               let update_result = update_network_graph(&network_graph, &valid_input[..]);
+               if update_result.is_err() {
+                       panic!("Unexpected update result: {:?}", update_result)
+               }
+
+               assert_eq!(network_graph.read_only().channels().len(), 2);
+               let after = network_graph.to_string();
+               assert!(
+                       after.contains("021607cfce19a4c5e7e6e738663dfafbbbac262e4ff76c2c9b30dbeefc35c00643")
+               );
+               assert!(
+                       after.contains("02247d9db0dfafea745ef8c9e161eb322f73ac3f8858d8730b6fd97254747ce76b")
+               );
+               assert!(
+                       after.contains("029e01f279986acc83ba235d46d80aede0b7595f410353b93a8ab540bb677f4432")
+               );
+               assert!(
+                       after.contains("02c913118a8895b9e29c89af6e20ed00d95a1f64e4952edbafa84d048f26804c61")
+               );
+               assert!(after.contains("619737530008010752"));
+               assert!(after.contains("783241506229452801"));
+       }
+}
index f0ee1c85937bd904862caa5845d93809653bc316..5128600163a42854f0be6c2bb5f27f79608050e6 100644 (file)
@@ -166,11 +166,11 @@ pub struct HTLCUpdate {
        pub(crate) payment_hash: PaymentHash,
        pub(crate) payment_preimage: Option<PaymentPreimage>,
        pub(crate) source: HTLCSource,
-       pub(crate) onchain_value_satoshis: Option<u64>,
+       pub(crate) htlc_value_satoshis: Option<u64>,
 }
 impl_writeable_tlv_based!(HTLCUpdate, {
        (0, payment_hash, required),
-       (1, onchain_value_satoshis, option),
+       (1, htlc_value_satoshis, option),
        (2, source, required),
        (4, payment_preimage, option),
 });
@@ -357,10 +357,10 @@ enum OnchainEvent {
        HTLCUpdate {
                source: HTLCSource,
                payment_hash: PaymentHash,
-               onchain_value_satoshis: Option<u64>,
+               htlc_value_satoshis: Option<u64>,
                /// None in the second case, above, ie when there is no relevant output in the commitment
                /// transaction which appeared on chain.
-               input_idx: Option<u32>,
+               commitment_tx_output_idx: Option<u32>,
        },
        MaturingOutput {
                descriptor: SpendableOutputDescriptor,
@@ -381,7 +381,7 @@ enum OnchainEvent {
        ///  * a revoked-state HTLC transaction was broadcasted, which was claimed by the revocation
        ///    signature.
        HTLCSpendConfirmation {
-               input_idx: u32,
+               commitment_tx_output_idx: u32,
                /// If the claim was made by either party with a preimage, this is filled in
                preimage: Option<PaymentPreimage>,
                /// If the claim was made by us on an inbound HTLC against a local commitment transaction,
@@ -423,9 +423,9 @@ impl MaybeReadable for OnchainEventEntry {
 impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,
        (0, HTLCUpdate) => {
                (0, source, required),
-               (1, onchain_value_satoshis, option),
+               (1, htlc_value_satoshis, option),
                (2, payment_hash, required),
-               (3, input_idx, option),
+               (3, commitment_tx_output_idx, option),
        },
        (1, MaturingOutput) => {
                (0, descriptor, required),
@@ -434,7 +434,7 @@ impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,
                (0, on_local_output_csv, option),
        },
        (5, HTLCSpendConfirmation) => {
-               (0, input_idx, required),
+               (0, commitment_tx_output_idx, required),
                (2, preimage, option),
                (4, on_to_local_output_csv, option),
        },
@@ -452,7 +452,7 @@ pub(crate) enum ChannelMonitorUpdateStep {
                commitment_txid: Txid,
                htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>,
                commitment_number: u64,
-               their_revocation_point: PublicKey,
+               their_per_commitment_point: PublicKey,
        },
        PaymentPreimage {
                payment_preimage: PaymentPreimage,
@@ -494,7 +494,7 @@ impl_writeable_tlv_based_enum_upgradable!(ChannelMonitorUpdateStep,
        (1, LatestCounterpartyCommitmentTXInfo) => {
                (0, commitment_txid, required),
                (2, commitment_number, required),
-               (4, their_revocation_point, required),
+               (4, their_per_commitment_point, required),
                (6, htlc_outputs, vec_type),
        },
        (2, PaymentPreimage) => {
@@ -568,13 +568,13 @@ pub enum Balance {
 /// An HTLC which has been irrevocably resolved on-chain, and has reached ANTI_REORG_DELAY.
 #[derive(PartialEq)]
 struct IrrevocablyResolvedHTLC {
-       input_idx: u32,
+       commitment_tx_output_idx: u32,
        /// Only set if the HTLC claim was ours using a payment preimage
        payment_preimage: Option<PaymentPreimage>,
 }
 
 impl_writeable_tlv_based!(IrrevocablyResolvedHTLC, {
-       (0, input_idx, required),
+       (0, commitment_tx_output_idx, required),
        (2, payment_preimage, option),
 });
 
@@ -619,8 +619,8 @@ pub(crate) struct ChannelMonitorImpl<Signer: Sign> {
        counterparty_commitment_params: CounterpartyCommitmentParameters,
        funding_redeemscript: Script,
        channel_value_satoshis: u64,
-       // first is the idx of the first of the two revocation points
-       their_cur_revocation_points: Option<(u64, PublicKey, Option<PublicKey>)>,
+       // first is the idx of the first of the two per-commitment points
+       their_cur_per_commitment_points: Option<(u64, PublicKey, Option<PublicKey>)>,
 
        on_holder_tx_csv: u16,
 
@@ -757,7 +757,7 @@ impl<Signer: Sign> PartialEq for ChannelMonitorImpl<Signer> {
                        self.counterparty_commitment_params != other.counterparty_commitment_params ||
                        self.funding_redeemscript != other.funding_redeemscript ||
                        self.channel_value_satoshis != other.channel_value_satoshis ||
-                       self.their_cur_revocation_points != other.their_cur_revocation_points ||
+                       self.their_cur_per_commitment_points != other.their_cur_per_commitment_points ||
                        self.on_holder_tx_csv != other.on_holder_tx_csv ||
                        self.commitment_secrets != other.commitment_secrets ||
                        self.counterparty_claimable_outpoints != other.counterparty_claimable_outpoints ||
@@ -832,7 +832,7 @@ impl<Signer: Sign> Writeable for ChannelMonitorImpl<Signer> {
                self.funding_redeemscript.write(writer)?;
                self.channel_value_satoshis.write(writer)?;
 
-               match self.their_cur_revocation_points {
+               match self.their_cur_per_commitment_points {
                        Some((idx, pubkey, second_option)) => {
                                writer.write_all(&byte_utils::be48_to_array(idx))?;
                                writer.write_all(&pubkey.serialize())?;
@@ -1024,7 +1024,7 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                                counterparty_commitment_params,
                                funding_redeemscript,
                                channel_value_satoshis,
-                               their_cur_revocation_points: None,
+                               their_cur_per_commitment_points: None,
 
                                on_holder_tx_csv: counterparty_channel_parameters.selected_contest_delay,
 
@@ -1074,11 +1074,11 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                txid: Txid,
                htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>,
                commitment_number: u64,
-               their_revocation_point: PublicKey,
+               their_per_commitment_point: PublicKey,
                logger: &L,
        ) where L::Target: Logger {
                self.inner.lock().unwrap().provide_latest_counterparty_commitment_tx(
-                       txid, htlc_outputs, commitment_number, their_revocation_point, logger)
+                       txid, htlc_outputs, commitment_number, their_per_commitment_point, logger)
        }
 
        #[cfg(test)]
@@ -1396,10 +1396,10 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                macro_rules! walk_htlcs {
                        ($holder_commitment: expr, $htlc_iter: expr) => {
                                for htlc in $htlc_iter {
-                                       if let Some(htlc_input_idx) = htlc.transaction_output_index {
+                                       if let Some(htlc_commitment_tx_output_idx) = htlc.transaction_output_index {
                                                if let Some(conf_thresh) = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
                                                        if let OnchainEvent::MaturingOutput { descriptor: SpendableOutputDescriptor::DelayedPaymentOutput(descriptor) } = &event.event {
-                                                               if descriptor.outpoint.index as u32 == htlc_input_idx { Some(event.confirmation_threshold()) } else { None }
+                                                               if descriptor.outpoint.index as u32 == htlc_commitment_tx_output_idx { Some(event.confirmation_threshold()) } else { None }
                                                        } else { None }
                                                }) {
                                                        debug_assert!($holder_commitment);
@@ -1407,7 +1407,7 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                                                                claimable_amount_satoshis: htlc.amount_msat / 1000,
                                                                confirmation_height: conf_thresh,
                                                        });
-                                               } else if us.htlcs_resolved_on_chain.iter().any(|v| v.input_idx == htlc_input_idx) {
+                                               } else if us.htlcs_resolved_on_chain.iter().any(|v| v.commitment_tx_output_idx == htlc_commitment_tx_output_idx) {
                                                        // Funding transaction spends should be fully confirmed by the time any
                                                        // HTLC transactions are resolved, unless we're talking about a holder
                                                        // commitment tx, whose resolution is delayed until the CSV timeout is
@@ -1419,8 +1419,9 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                                                        // indicating we have spent this HTLC with a timeout, claiming it back
                                                        // and awaiting confirmations on it.
                                                        let htlc_update_pending = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
-                                                               if let OnchainEvent::HTLCUpdate { input_idx: Some(input_idx), .. } = event.event {
-                                                                       if input_idx == htlc_input_idx { Some(event.confirmation_threshold()) } else { None }
+                                                               if let OnchainEvent::HTLCUpdate { commitment_tx_output_idx: Some(commitment_tx_output_idx), .. } = event.event {
+                                                                       if commitment_tx_output_idx == htlc_commitment_tx_output_idx {
+                                                                               Some(event.confirmation_threshold()) } else { None }
                                                                } else { None }
                                                        });
                                                        if let Some(conf_thresh) = htlc_update_pending {
@@ -1441,8 +1442,8 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                                                        // preimage, we lost funds to our counterparty! We will then continue
                                                        // to show it as ContentiousClaimable until ANTI_REORG_DELAY.
                                                        let htlc_spend_pending = us.onchain_events_awaiting_threshold_conf.iter().find_map(|event| {
-                                                               if let OnchainEvent::HTLCSpendConfirmation { input_idx, preimage, .. } = event.event {
-                                                                       if input_idx == htlc_input_idx {
+                                                               if let OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx, preimage, .. } = event.event {
+                                                                       if commitment_tx_output_idx == htlc_commitment_tx_output_idx {
                                                                                Some((event.confirmation_threshold(), preimage.is_some()))
                                                                        } else { None }
                                                                } else { None }
@@ -1551,7 +1552,7 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                macro_rules! walk_htlcs {
                        ($holder_commitment: expr, $htlc_iter: expr) => {
                                for (htlc, source) in $htlc_iter {
-                                       if us.htlcs_resolved_on_chain.iter().any(|v| Some(v.input_idx) == htlc.transaction_output_index) {
+                                       if us.htlcs_resolved_on_chain.iter().any(|v| Some(v.commitment_tx_output_idx) == htlc.transaction_output_index) {
                                                // We should assert that funding_spend_confirmed is_some() here, but we
                                                // have some unit tests which violate HTLC transaction CSVs entirely and
                                                // would fail.
@@ -1562,17 +1563,17 @@ impl<Signer: Sign> ChannelMonitor<Signer> {
                                                // indicating we have spent this HTLC with a timeout, claiming it back
                                                // and awaiting confirmations on it.
                                                let htlc_update_confd = us.onchain_events_awaiting_threshold_conf.iter().any(|event| {
-                                                       if let OnchainEvent::HTLCUpdate { input_idx: Some(input_idx), .. } = event.event {
+                                                       if let OnchainEvent::HTLCUpdate { commitment_tx_output_idx: Some(commitment_tx_output_idx), .. } = event.event {
                                                                // If the HTLC was timed out, we wait for ANTI_REORG_DELAY blocks
                                                                // before considering it "no longer pending" - this matches when we
                                                                // provide the ChannelManager an HTLC failure event.
-                                                               Some(input_idx) == htlc.transaction_output_index &&
+                                                               Some(commitment_tx_output_idx) == htlc.transaction_output_index &&
                                                                        us.best_block.height() >= event.height + ANTI_REORG_DELAY - 1
-                                                       } else if let OnchainEvent::HTLCSpendConfirmation { input_idx, .. } = event.event {
+                                                       } else if let OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx, .. } = event.event {
                                                                // If the HTLC was fulfilled with a preimage, we consider the HTLC
                                                                // immediately non-pending, matching when we provide ChannelManager
                                                                // the preimage.
-                                                               Some(input_idx) == htlc.transaction_output_index
+                                                               Some(commitment_tx_output_idx) == htlc.transaction_output_index
                                                        } else { false }
                                                });
                                                if !htlc_update_confd {
@@ -1697,8 +1698,8 @@ macro_rules! fail_unbroadcast_htlcs {
                                                                event: OnchainEvent::HTLCUpdate {
                                                                        source: (**source).clone(),
                                                                        payment_hash: htlc.payment_hash.clone(),
-                                                                       onchain_value_satoshis: Some(htlc.amount_msat / 1000),
-                                                                       input_idx: None,
+                                                                       htlc_value_satoshis: Some(htlc.amount_msat / 1000),
+                                                                       commitment_tx_output_idx: None,
                                                                },
                                                        };
                                                        log_trace!($logger, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of {} commitment transaction, waiting for confirmation (at height {})",
@@ -1770,7 +1771,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                Ok(())
        }
 
-       pub(crate) fn provide_latest_counterparty_commitment_tx<L: Deref>(&mut self, txid: Txid, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>, commitment_number: u64, their_revocation_point: PublicKey, logger: &L) where L::Target: Logger {
+       pub(crate) fn provide_latest_counterparty_commitment_tx<L: Deref>(&mut self, txid: Txid, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>, commitment_number: u64, their_per_commitment_point: PublicKey, logger: &L) where L::Target: Logger {
                // TODO: Encrypt the htlc_outputs data with the single-hash of the commitment transaction
                // so that a remote monitor doesn't learn anything unless there is a malicious close.
                // (only maybe, sadly we cant do the same for local info, as we need to be aware of
@@ -1785,22 +1786,22 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                self.counterparty_claimable_outpoints.insert(txid, htlc_outputs.clone());
                self.current_counterparty_commitment_number = commitment_number;
                //TODO: Merge this into the other per-counterparty-transaction output storage stuff
-               match self.their_cur_revocation_points {
+               match self.their_cur_per_commitment_points {
                        Some(old_points) => {
                                if old_points.0 == commitment_number + 1 {
-                                       self.their_cur_revocation_points = Some((old_points.0, old_points.1, Some(their_revocation_point)));
+                                       self.their_cur_per_commitment_points = Some((old_points.0, old_points.1, Some(their_per_commitment_point)));
                                } else if old_points.0 == commitment_number + 2 {
                                        if let Some(old_second_point) = old_points.2 {
-                                               self.their_cur_revocation_points = Some((old_points.0 - 1, old_second_point, Some(their_revocation_point)));
+                                               self.their_cur_per_commitment_points = Some((old_points.0 - 1, old_second_point, Some(their_per_commitment_point)));
                                        } else {
-                                               self.their_cur_revocation_points = Some((commitment_number, their_revocation_point, None));
+                                               self.their_cur_per_commitment_points = Some((commitment_number, their_per_commitment_point, None));
                                        }
                                } else {
-                                       self.their_cur_revocation_points = Some((commitment_number, their_revocation_point, None));
+                                       self.their_cur_per_commitment_points = Some((commitment_number, their_per_commitment_point, None));
                                }
                        },
                        None => {
-                               self.their_cur_revocation_points = Some((commitment_number, their_revocation_point, None));
+                               self.their_cur_per_commitment_points = Some((commitment_number, their_per_commitment_point, None));
                        }
                }
                let mut htlcs = Vec::with_capacity(htlc_outputs.len());
@@ -1938,9 +1939,9 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                ret = Err(());
                                        }
                                }
-                               ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { commitment_txid, htlc_outputs, commitment_number, their_revocation_point } => {
+                               ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { commitment_txid, htlc_outputs, commitment_number, their_per_commitment_point } => {
                                        log_trace!(logger, "Updating ChannelMonitor with latest counterparty commitment transaction info");
-                                       self.provide_latest_counterparty_commitment_tx(*commitment_txid, htlc_outputs.clone(), *commitment_number, *their_revocation_point, logger)
+                                       self.provide_latest_counterparty_commitment_tx(*commitment_txid, htlc_outputs.clone(), *commitment_number, *their_per_commitment_point, logger)
                                },
                                ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } => {
                                        log_trace!(logger, "Updating ChannelMonitor with payment preimage");
@@ -2129,18 +2130,18 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
        fn get_counterparty_htlc_output_claim_reqs(&self, commitment_number: u64, commitment_txid: Txid, tx: Option<&Transaction>) -> Vec<PackageTemplate> {
                let mut claimable_outpoints = Vec::new();
                if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&commitment_txid) {
-                       if let Some(revocation_points) = self.their_cur_revocation_points {
-                               let revocation_point_option =
+                       if let Some(per_commitment_points) = self.their_cur_per_commitment_points {
+                               let per_commitment_point_option =
                                        // If the counterparty commitment tx is the latest valid state, use their latest
                                        // per-commitment point
-                                       if revocation_points.0 == commitment_number { Some(&revocation_points.1) }
-                                       else if let Some(point) = revocation_points.2.as_ref() {
+                                       if per_commitment_points.0 == commitment_number { Some(&per_commitment_points.1) }
+                                       else if let Some(point) = per_commitment_points.2.as_ref() {
                                                // If counterparty commitment tx is the state previous to the latest valid state, use
                                                // their previous per-commitment point (non-atomicity of revocation means it's valid for
                                                // them to temporarily have two valid commitment txns from our viewpoint)
-                                               if revocation_points.0 == commitment_number + 1 { Some(point) } else { None }
+                                               if per_commitment_points.0 == commitment_number + 1 { Some(point) } else { None }
                                        } else { None };
-                               if let Some(revocation_point) = revocation_point_option {
+                               if let Some(per_commitment_point) = per_commitment_point_option {
                                        for (_, &(ref htlc, _)) in htlc_outputs.iter().enumerate() {
                                                if let Some(transaction_output_index) = htlc.transaction_output_index {
                                                        if let Some(transaction) = tx {
@@ -2151,7 +2152,19 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                        }
                                                        let preimage = if htlc.offered { if let Some(p) = self.payment_preimages.get(&htlc.payment_hash) { Some(*p) } else { None } } else { None };
                                                        if preimage.is_some() || !htlc.offered {
-                                                               let counterparty_htlc_outp = if htlc.offered { PackageSolvingData::CounterpartyOfferedHTLCOutput(CounterpartyOfferedHTLCOutput::build(*revocation_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, preimage.unwrap(), htlc.clone())) } else { PackageSolvingData::CounterpartyReceivedHTLCOutput(CounterpartyReceivedHTLCOutput::build(*revocation_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, htlc.clone())) };
+                                                               let counterparty_htlc_outp = if htlc.offered {
+                                                                       PackageSolvingData::CounterpartyOfferedHTLCOutput(
+                                                                               CounterpartyOfferedHTLCOutput::build(*per_commitment_point,
+                                                                                       self.counterparty_commitment_params.counterparty_delayed_payment_base_key,
+                                                                                       self.counterparty_commitment_params.counterparty_htlc_base_key,
+                                                                                       preimage.unwrap(), htlc.clone()))
+                                                               } else {
+                                                                       PackageSolvingData::CounterpartyReceivedHTLCOutput(
+                                                                               CounterpartyReceivedHTLCOutput::build(*per_commitment_point,
+                                                                                       self.counterparty_commitment_params.counterparty_delayed_payment_base_key,
+                                                                                       self.counterparty_commitment_params.counterparty_htlc_base_key,
+                                                                                       htlc.clone()))
+                                                               };
                                                                let aggregation = if !htlc.offered { false } else { true };
                                                                let counterparty_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, counterparty_htlc_outp, htlc.cltv_expiry,aggregation, 0);
                                                                claimable_outpoints.push(counterparty_package);
@@ -2531,7 +2544,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                // Produce actionable events from on-chain events having reached their threshold.
                for entry in onchain_events_reaching_threshold_conf.drain(..) {
                        match entry.event {
-                               OnchainEvent::HTLCUpdate { ref source, payment_hash, onchain_value_satoshis, input_idx } => {
+                               OnchainEvent::HTLCUpdate { ref source, payment_hash, htlc_value_satoshis, commitment_tx_output_idx } => {
                                        // Check for duplicate HTLC resolutions.
                                        #[cfg(debug_assertions)]
                                        {
@@ -2553,10 +2566,10 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                payment_hash,
                                                payment_preimage: None,
                                                source: source.clone(),
-                                               onchain_value_satoshis,
+                                               htlc_value_satoshis,
                                        }));
-                                       if let Some(idx) = input_idx {
-                                               self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC { input_idx: idx, payment_preimage: None });
+                                       if let Some(idx) = commitment_tx_output_idx {
+                                               self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC { commitment_tx_output_idx: idx, payment_preimage: None });
                                        }
                                },
                                OnchainEvent::MaturingOutput { descriptor } => {
@@ -2565,8 +2578,8 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                outputs: vec![descriptor]
                                        });
                                },
-                               OnchainEvent::HTLCSpendConfirmation { input_idx, preimage, .. } => {
-                                       self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC { input_idx, payment_preimage: preimage });
+                               OnchainEvent::HTLCSpendConfirmation { commitment_tx_output_idx, preimage, .. } => {
+                                       self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC { commitment_tx_output_idx, payment_preimage: preimage });
                                },
                                OnchainEvent::FundingSpendConfirmation { .. } => {
                                        self.funding_spend_confirmed = Some(entry.txid);
@@ -2835,7 +2848,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                                        self.onchain_events_awaiting_threshold_conf.push(OnchainEventEntry {
                                                                                txid: tx.txid(), height,
                                                                                event: OnchainEvent::HTLCSpendConfirmation {
-                                                                                       input_idx: input.previous_output.vout,
+                                                                                       commitment_tx_output_idx: input.previous_output.vout,
                                                                                        preimage: if accepted_preimage_claim || offered_preimage_claim {
                                                                                                Some(payment_preimage) } else { None },
                                                                                        // If this is a payment to us (!outbound_htlc, above),
@@ -2886,7 +2899,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                        txid: tx.txid(),
                                                        height,
                                                        event: OnchainEvent::HTLCSpendConfirmation {
-                                                               input_idx: input.previous_output.vout,
+                                                               commitment_tx_output_idx: input.previous_output.vout,
                                                                preimage: Some(payment_preimage),
                                                                on_to_local_output_csv: None,
                                                        },
@@ -2895,7 +2908,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                        source,
                                                        payment_preimage: Some(payment_preimage),
                                                        payment_hash,
-                                                       onchain_value_satoshis: Some(amount_msat / 1000),
+                                                       htlc_value_satoshis: Some(amount_msat / 1000),
                                                }));
                                        }
                                } else if offered_preimage_claim {
@@ -2907,7 +2920,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                        txid: tx.txid(),
                                                        height,
                                                        event: OnchainEvent::HTLCSpendConfirmation {
-                                                               input_idx: input.previous_output.vout,
+                                                               commitment_tx_output_idx: input.previous_output.vout,
                                                                preimage: Some(payment_preimage),
                                                                on_to_local_output_csv: None,
                                                        },
@@ -2916,7 +2929,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                        source,
                                                        payment_preimage: Some(payment_preimage),
                                                        payment_hash,
-                                                       onchain_value_satoshis: Some(amount_msat / 1000),
+                                                       htlc_value_satoshis: Some(amount_msat / 1000),
                                                }));
                                        }
                                } else {
@@ -2934,8 +2947,8 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                height,
                                                event: OnchainEvent::HTLCUpdate {
                                                        source, payment_hash,
-                                                       onchain_value_satoshis: Some(amount_msat / 1000),
-                                                       input_idx: Some(input.previous_output.vout),
+                                                       htlc_value_satoshis: Some(amount_msat / 1000),
+                                                       commitment_tx_output_idx: Some(input.previous_output.vout),
                                                },
                                        };
                                        log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height {})", log_bytes!(payment_hash.0), entry.confirmation_threshold());
@@ -3103,7 +3116,7 @@ impl<'a, Signer: Sign, K: KeysInterface<Signer = Signer>> ReadableArgs<&'a K>
                let funding_redeemscript = Readable::read(reader)?;
                let channel_value_satoshis = Readable::read(reader)?;
 
-               let their_cur_revocation_points = {
+               let their_cur_per_commitment_points = {
                        let first_idx = <U48 as Readable>::read(reader)?.0;
                        if first_idx == 0 {
                                None
@@ -3292,7 +3305,7 @@ impl<'a, Signer: Sign, K: KeysInterface<Signer = Signer>> ReadableArgs<&'a K>
                                counterparty_commitment_params,
                                funding_redeemscript,
                                channel_value_satoshis,
-                               their_cur_revocation_points,
+                               their_cur_per_commitment_points,
 
                                on_holder_tx_csv,
 
index 48aa712f39c1d55263f7cd5129e0b3cce3690d41..5959508de3b728e29b13d326b76c066d665c48bd 100644 (file)
@@ -76,7 +76,7 @@ pub trait Access {
        /// Returns an error if `genesis_hash` is for a different chain or if such a transaction output
        /// is unknown.
        ///
-       /// [`short_channel_id`]: https://github.com/lightningnetwork/lightning-rfc/blob/master/07-routing-gossip.md#definition-of-short_channel_id
+       /// [`short_channel_id`]: https://github.com/lightning/bolts/blob/master/07-routing-gossip.md#definition-of-short_channel_id
        fn get_utxo(&self, genesis_hash: &BlockHash, short_channel_id: u64) -> Result<TxOut, AccessError>;
 }
 
index abdc10c577a4f476b70929dad51499b26f1b0cfe..3f88a208e9d00da1976fafd6e42fbbf6c6ace030 100644 (file)
 //! figure out how best to make networking happen/timers fire/things get written to disk/keys get
 //! generated/etc. This makes it a good candidate for tight integration into an existing wallet
 //! instead of having a rather-separate lightning appendage to a wallet.
+//! 
+//! `default` features are:
+//!
+//! * `std` - enables functionalities which require `std`, including `std::io` trait implementations and things which utilize time
+//! * `grind_signatures` - enables generation of [low-r bitcoin signatures](https://bitcoin.stackexchange.com/questions/111660/what-is-signature-grinding),
+//! which saves 1 byte per signature in 50% of the cases (see [bitcoin PR #13666](https://github.com/bitcoin/bitcoin/pull/13666))
+//!
+//! Available features are:
+//!
+//! * `std`
+//! * `grind_signatures`
+//! * `no-std ` - exposes write trait implementations from the `core2` crate (at least one of `no-std` or `std` are required)
+//! * Skip logging of messages at levels below the given log level:
+//!     * `max_level_off`
+//!     * `max_level_error`
+//!     * `max_level_warn`
+//!     * `max_level_info`
+//!     * `max_level_debug`
+//!     * `max_level_trace`
 
 #![cfg_attr(not(any(test, fuzzing, feature = "_test_utils")), deny(missing_docs))]
 #![cfg_attr(not(any(test, fuzzing, feature = "_test_utils")), forbid(unsafe_code))]
@@ -158,7 +177,7 @@ mod sync {
        #[cfg(test)]
        pub use debug_sync::*;
        #[cfg(not(test))]
-       pub use ::std::sync::{Arc, Mutex, Condvar, MutexGuard, RwLock, RwLockReadGuard};
+       pub use ::std::sync::{Arc, Mutex, Condvar, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard};
        #[cfg(not(test))]
        pub use crate::util::fairrwlock::FairRwLock;
 }
index 9e987c3deec005debbdb81fd7557a6be75e955f1..41d1eff856a71ed88cf8f43c14d8ffe234d27447 100644 (file)
@@ -139,7 +139,7 @@ pub fn build_closing_transaction(to_holder_value_sat: u64, to_counterparty_value
 }
 
 /// Implements the per-commitment secret storage scheme from
-/// [BOLT 3](https://github.com/lightningnetwork/lightning-rfc/blob/dcbf8583976df087c79c3ce0b535311212e6812d/03-transactions.md#efficient-per-commitment-secret-storage).
+/// [BOLT 3](https://github.com/lightning/bolts/blob/dcbf8583976df087c79c3ce0b535311212e6812d/03-transactions.md#efficient-per-commitment-secret-storage).
 ///
 /// Allows us to keep track of all of the revocation secrets of our counterparty in just 50*32 bytes
 /// or so.
index 5d0ef759cf773f5f7c9da9b7a09e973fde23f86a..fe80d350cd7de4010058453c6bd31fef543b6e3f 100644 (file)
@@ -103,7 +103,7 @@ fn test_monitor_and_persister_update_fail() {
                // Because we will connect a block at height 200 below, we need the TestBroadcaster to know
                // that we are at height 200 so that it doesn't think we're violating the time lock
                // requirements of transactions broadcasted at that point.
-               blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet).header, 200); 200])),
+               blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet), 200); 200])),
        };
        let chain_mon = {
                let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
index cde1e8a13bd0d678ef0d54da0fcf4458f2b482ae..01cd6890a9f58100a54e0fe11d790822521cfa2a 100644 (file)
@@ -126,7 +126,7 @@ enum InboundHTLCState {
        /// signatures in a commitment_signed message.
        /// Implies AwaitingRemoteRevoke.
        ///
-       /// [BOLT #2]: https://github.com/lightningnetwork/lightning-rfc/blob/master/02-peer-protocol.md
+       /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
        AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
        /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
        /// We have also included this HTLC in our latest commitment_signed and are now just waiting
@@ -710,6 +710,11 @@ pub(super) struct Channel<Signer: Sign> {
        // Our counterparty can offer us SCID aliases which they will map to this channel when routing
        // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
        // the channel's funding UTXO.
+       //
+       // We also use this when sending our peer a channel_update that isn't to be broadcasted
+       // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
+       // associated channel mapping.
+       //
        // We only bother storing the most recent SCID alias at any time, though our counterparty has
        // to store all of them.
        latest_inbound_scid_alias: Option<u64>,
@@ -775,7 +780,7 @@ pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS
 /// In order to avoid having to concern ourselves with standardness during the closing process, we
 /// simply require our counterparty to use a dust limit which will leave any segwit output
 /// standard.
-/// See https://github.com/lightningnetwork/lightning-rfc/issues/905 for more details.
+/// See https://github.com/lightning/bolts/issues/905 for more details.
 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
 
 /// Used to return a simple Error back to ChannelManager. Will get converted to a
@@ -1307,7 +1312,7 @@ impl<Signer: Sign> Channel<Signer> {
                        counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
                        holder_htlc_minimum_msat: if config.own_channel_config.our_htlc_minimum_msat == 0 { 1 } else { config.own_channel_config.our_htlc_minimum_msat },
                        counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
-                       minimum_depth: Some(config.own_channel_config.minimum_depth),
+                       minimum_depth: Some(cmp::max(config.own_channel_config.minimum_depth, 1)),
 
                        counterparty_forwarding_info: None,
 
@@ -2013,12 +2018,6 @@ impl<Signer: Sign> Channel<Signer> {
                if msg.minimum_depth > peer_limits.max_minimum_depth {
                        return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
                }
-               if msg.minimum_depth == 0 {
-                       // Note that if this changes we should update the serialization minimum version to
-                       // indicate to older clients that they don't understand some features of the current
-                       // channel.
-                       return Err(ChannelError::Close("Minimum confirmation depth must be at least 1".to_owned()));
-               }
 
                if let Some(ty) = &msg.channel_type {
                        if *ty != self.channel_type {
@@ -2055,7 +2054,12 @@ impl<Signer: Sign> Channel<Signer> {
                self.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
                self.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
                self.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
-               self.minimum_depth = Some(msg.minimum_depth);
+
+               if peer_limits.trust_own_funding_0conf {
+                       self.minimum_depth = Some(msg.minimum_depth);
+               } else {
+                       self.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
+               }
 
                let counterparty_pubkeys = ChannelPublicKeys {
                        funding_pubkey: msg.funding_pubkey,
@@ -2115,7 +2119,7 @@ impl<Signer: Sign> Channel<Signer> {
                &self.get_counterparty_pubkeys().funding_pubkey
        }
 
-       pub fn funding_created<L: Deref>(&mut self, msg: &msgs::FundingCreated, best_block: BestBlock, logger: &L) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>), ChannelError> where L::Target: Logger {
+       pub fn funding_created<L: Deref>(&mut self, msg: &msgs::FundingCreated, best_block: BestBlock, logger: &L) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>, Option<msgs::FundingLocked>), ChannelError> where L::Target: Logger {
                if self.is_outbound() {
                        return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned()));
                }
@@ -2190,12 +2194,12 @@ impl<Signer: Sign> Channel<Signer> {
                Ok((msgs::FundingSigned {
                        channel_id: self.channel_id,
                        signature
-               }, channel_monitor))
+               }, channel_monitor, self.check_get_funding_locked(0)))
        }
 
        /// Handles a funding_signed message from the remote end.
        /// If this call is successful, broadcast the funding transaction (and not before!)
-       pub fn funding_signed<L: Deref>(&mut self, msg: &msgs::FundingSigned, best_block: BestBlock, logger: &L) -> Result<(ChannelMonitor<Signer>, Transaction), ChannelError> where L::Target: Logger {
+       pub fn funding_signed<L: Deref>(&mut self, msg: &msgs::FundingSigned, best_block: BestBlock, logger: &L) -> Result<(ChannelMonitor<Signer>, Transaction, Option<msgs::FundingLocked>), ChannelError> where L::Target: Logger {
                if !self.is_outbound() {
                        return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
                }
@@ -2264,7 +2268,7 @@ impl<Signer: Sign> Channel<Signer> {
 
                log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id()));
 
-               Ok((channel_monitor, self.funding_transaction.as_ref().cloned().unwrap()))
+               Ok((channel_monitor, self.funding_transaction.as_ref().cloned().unwrap(), self.check_get_funding_locked(0)))
        }
 
        /// Handles a funding_locked message from our peer. If we've already sent our funding_locked
@@ -3566,12 +3570,13 @@ impl<Signer: Sign> Channel<Signer> {
        /// monitor update failure must *not* have been sent to the remote end, and must instead
        /// have been dropped. They will be regenerated when monitor_updating_restored is called.
        pub fn monitor_update_failed(&mut self, resend_raa: bool, resend_commitment: bool,
-               mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
+               resend_funding_locked: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
                mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
                mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
        ) {
                self.monitor_pending_revoke_and_ack |= resend_raa;
                self.monitor_pending_commitment_signed |= resend_commitment;
+               self.monitor_pending_funding_locked |= resend_funding_locked;
                self.monitor_pending_forwards.append(&mut pending_forwards);
                self.monitor_pending_failures.append(&mut pending_fails);
                self.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
@@ -3585,17 +3590,28 @@ impl<Signer: Sign> Channel<Signer> {
                assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, ChannelState::MonitorUpdateFailed as u32);
                self.channel_state &= !(ChannelState::MonitorUpdateFailed as u32);
 
-               let funding_broadcastable = if self.channel_state & (ChannelState::FundingSent as u32) != 0 && self.is_outbound() {
-                       self.funding_transaction.take()
-               } else { None };
+               // If we're past (or at) the FundingSent stage on an outbound channel, try to
+               // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
+               // first received the funding_signed.
+               let mut funding_broadcastable =
+                       if self.is_outbound() && self.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 {
+                               self.funding_transaction.take()
+                       } else { None };
+               // That said, if the funding transaction is already confirmed (ie we're active with a
+               // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
+               if self.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelFunded as u32 && self.minimum_depth != Some(0) {
+                       funding_broadcastable = None;
+               }
 
                // We will never broadcast the funding transaction when we're in MonitorUpdateFailed (and
                // we assume the user never directly broadcasts the funding transaction and waits for us to
-               // do it). Thus, we can only ever hit monitor_pending_funding_locked when we're an inbound
-               // channel which failed to persist the monitor on funding_created, and we got the funding
-               // transaction confirmed before the monitor was persisted.
+               // do it). Thus, we can only ever hit monitor_pending_funding_locked when we're
+               // * an inbound channel that failed to persist the monitor on funding_created and we got
+               //   the funding transaction confirmed before the monitor was persisted, or
+               // * a 0-conf channel and intended to send the funding_locked before any broadcast at all.
                let funding_locked = if self.monitor_pending_funding_locked {
-                       assert!(!self.is_outbound(), "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
+                       assert!(!self.is_outbound() || self.minimum_depth == Some(0),
+                               "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
                        self.monitor_pending_funding_locked = false;
                        let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
                        Some(msgs::FundingLocked {
@@ -4577,6 +4593,11 @@ impl<Signer: Sign> Channel<Signer> {
                self.channel_state >= ChannelState::FundingSent as u32
        }
 
+       /// Returns true if our funding_locked has been sent
+       pub fn is_our_funding_locked(&self) -> bool {
+               (self.channel_state & ChannelState::OurFundingLocked as u32) != 0 || self.channel_state >= ChannelState::ChannelFunded as u32
+       }
+
        /// Returns true if our peer has either initiated or agreed to shut down the channel.
        pub fn received_shutdown(&self) -> bool {
                (self.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
@@ -4607,7 +4628,7 @@ impl<Signer: Sign> Channel<Signer> {
        }
 
        fn check_get_funding_locked(&mut self, height: u32) -> Option<msgs::FundingLocked> {
-               if self.funding_tx_confirmation_height == 0 {
+               if self.funding_tx_confirmation_height == 0 && self.minimum_depth != Some(0) {
                        return None;
                }
 
@@ -4662,12 +4683,11 @@ impl<Signer: Sign> Channel<Signer> {
        pub fn transactions_confirmed<L: Deref>(&mut self, block_hash: &BlockHash, height: u32,
                txdata: &TransactionData, genesis_block_hash: BlockHash, node_pk: PublicKey, logger: &L)
        -> Result<(Option<msgs::FundingLocked>, Option<msgs::AnnouncementSignatures>), ClosureReason> where L::Target: Logger {
-               let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
                if let Some(funding_txo) = self.get_funding_txo() {
                        for &(index_in_block, tx) in txdata.iter() {
-                               // If we haven't yet sent a funding_locked, but are in FundingSent (ignoring
-                               // whether they've sent a funding_locked or not), check if we should send one.
-                               if non_shutdown_state & !(ChannelState::TheirFundingLocked as u32) == ChannelState::FundingSent as u32 {
+                               // Check if the transaction is the expected funding transaction, and if it is,
+                               // check that it pays the right amount to the right script.
+                               if self.funding_tx_confirmation_height == 0 {
                                        if tx.txid() == funding_txo.txid {
                                                let txo_idx = funding_txo.index as usize;
                                                if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.get_funding_redeemscript().to_v0_p2wsh() ||
@@ -4784,9 +4804,9 @@ impl<Signer: Sign> Channel<Signer> {
                        // close the channel and hope we can get the latest state on chain (because presumably
                        // the funding transaction is at least still in the mempool of most nodes).
                        //
-                       // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf channel,
-                       // but not doing so may lead to the `ChannelManager::short_to_id` map being
-                       // inconsistent, so we currently have to.
+                       // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
+                       // 0-conf channel, but not doing so may lead to the `ChannelManager::short_to_id` map
+                       // being inconsistent, so we currently have to.
                        if funding_tx_confirmations == 0 && self.funding_tx_confirmed_in.is_some() {
                                let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
                                        self.minimum_depth.unwrap(), funding_tx_confirmations);
@@ -4883,6 +4903,12 @@ impl<Signer: Sign> Channel<Signer> {
                self.inbound_awaiting_accept
        }
 
+       /// Sets this channel to accepting 0conf, must be done before `get_accept_channel`
+       pub fn set_0conf(&mut self) {
+               assert!(self.inbound_awaiting_accept);
+               self.minimum_depth = Some(0);
+       }
+
        /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
        /// should be sent back to the counterparty node.
        ///
@@ -5426,7 +5452,7 @@ impl<Signer: Sign> Channel<Signer> {
                                commitment_txid: counterparty_commitment_txid,
                                htlc_outputs: htlcs.clone(),
                                commitment_number: self.cur_counterparty_commitment_transaction_number,
-                               their_revocation_point: self.counterparty_cur_commitment_point.unwrap()
+                               their_per_commitment_point: self.counterparty_cur_commitment_point.unwrap()
                        }]
                };
                self.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
@@ -5645,7 +5671,7 @@ impl<Signer: Sign> Channel<Signer> {
 }
 
 const SERIALIZATION_VERSION: u8 = 2;
-const MIN_SERIALIZATION_VERSION: u8 = 1;
+const MIN_SERIALIZATION_VERSION: u8 = 2;
 
 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
        (0, FailRelay),
@@ -5710,12 +5736,10 @@ impl<Signer: Sign> Writeable for Channel<Signer> {
 
                self.user_id.write(writer)?;
 
-               // Write out the old serialization for the config object. This is read by version-1
-               // deserializers, but we will read the version in the TLV at the end instead.
-               self.config.forwarding_fee_proportional_millionths.write(writer)?;
-               self.config.cltv_expiry_delta.write(writer)?;
-               self.config.announced_channel.write(writer)?;
-               self.config.commit_upfront_shutdown_pubkey.write(writer)?;
+               // Version 1 deserializers expected to read parts of the config object here. Version 2
+               // deserializers (0.0.99) now read config through TLVs, and as we now require them for
+               // `minimum_depth` we simply write dummy values here.
+               writer.write_all(&[0; 8])?;
 
                self.channel_id.write(writer)?;
                (self.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
@@ -6693,7 +6717,7 @@ mod tests {
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
                let funding_created_msg = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap();
-               let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&logger).unwrap();
+               let (funding_signed_msg, _, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&logger).unwrap();
 
                // Node B --> Node A: funding signed
                let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&logger);
index 40a92dffab34ff54f8954107f29640694e34ab9c..3de32529085e77c37f6030e8acd2a4b232c10795 100644 (file)
@@ -93,6 +93,8 @@ use util::crypto::sign;
 pub(super) enum PendingHTLCRouting {
        Forward {
                onion_packet: msgs::OnionPacket,
+               /// The SCID from the onion that we should forward to. This could be a "real" SCID, an
+               /// outbound SCID alias, or a phantom node SCID.
                short_channel_id: u64, // This should be NonZero<u64> eventually when we bump MSRV
        },
        Receive {
@@ -136,6 +138,8 @@ pub(super) enum HTLCForwardInfo {
                // `process_pending_htlc_forwards()` for constructing the
                // `HTLCSource::PreviousHopData` for failed and forwarded
                // HTLCs.
+               //
+               // Note that this may be an outbound SCID alias for the associated channel.
                prev_short_channel_id: u64,
                prev_htlc_id: u64,
                prev_funding_outpoint: OutPoint,
@@ -149,6 +153,7 @@ pub(super) enum HTLCForwardInfo {
 /// Tracks the inbound corresponding to an outbound HTLC
 #[derive(Clone, Hash, PartialEq, Eq)]
 pub(crate) struct HTLCPreviousHopData {
+       // Note that this may be an outbound SCID alias for the associated channel.
        short_channel_id: u64,
        htlc_id: u64,
        incoming_packet_shared_secret: [u8; 32],
@@ -965,9 +970,25 @@ pub struct ChannelDetails {
        /// Note that if [`inbound_scid_alias`] is set, it must be used for invoices and inbound
        /// payments instead of this. See [`get_inbound_payment_scid`].
        ///
+       /// For channels with [`confirmations_required`] set to `Some(0)`, [`outbound_scid_alias`] may
+       /// be used in place of this in outbound routes. See [`get_outbound_payment_scid`].
+       ///
        /// [`inbound_scid_alias`]: Self::inbound_scid_alias
+       /// [`outbound_scid_alias`]: Self::outbound_scid_alias
        /// [`get_inbound_payment_scid`]: Self::get_inbound_payment_scid
+       /// [`get_outbound_payment_scid`]: Self::get_outbound_payment_scid
+       /// [`confirmations_required`]: Self::confirmations_required
        pub short_channel_id: Option<u64>,
+       /// An optional [`short_channel_id`] alias for this channel, randomly generated by us and
+       /// usable in place of [`short_channel_id`] to reference the channel in outbound routes when
+       /// the channel has not yet been confirmed (as long as [`confirmations_required`] is
+       /// `Some(0)`).
+       ///
+       /// This will be `None` as long as the channel is not available for routing outbound payments.
+       ///
+       /// [`short_channel_id`]: Self::short_channel_id
+       /// [`confirmations_required`]: Self::confirmations_required
+       pub outbound_scid_alias: Option<u64>,
        /// An optional [`short_channel_id`] alias for this channel, randomly generated by our
        /// counterparty and usable in place of [`short_channel_id`] in invoice route hints. Our
        /// counterparty will recognize the alias provided here in place of the [`short_channel_id`]
@@ -1085,6 +1106,16 @@ impl ChannelDetails {
        pub fn get_inbound_payment_scid(&self) -> Option<u64> {
                self.inbound_scid_alias.or(self.short_channel_id)
        }
+
+       /// Gets the current SCID which should be used to identify this channel for outbound payments.
+       /// This should be used in [`Route`]s to describe the first hop or in other contexts where
+       /// we're sending or forwarding a payment outbound over this channel.
+       ///
+       /// This is either the [`ChannelDetails::short_channel_id`], if set, or the
+       /// [`ChannelDetails::outbound_scid_alias`]. See those for more information.
+       pub fn get_outbound_payment_scid(&self) -> Option<u64> {
+               self.short_channel_id.or(self.outbound_scid_alias)
+       }
 }
 
 /// If a payment fails to send, it can be in one of several states. This enum is returned as the
@@ -1283,7 +1314,7 @@ macro_rules! remove_channel {
 }
 
 macro_rules! handle_monitor_err {
-       ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
+       ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_funding_locked: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
                match $err {
                        ChannelMonitorUpdateErr::PermanentFailure => {
                                log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..]));
@@ -1321,13 +1352,13 @@ macro_rules! handle_monitor_err {
                                if !$resend_raa {
                                        debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
                                }
-                               $chan.monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
+                               $chan.monitor_update_failed($resend_raa, $resend_commitment, $resend_funding_locked, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
                                (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
                        },
                }
        };
-       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
-               let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
+       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_funding_locked: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
+               let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_funding_locked, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
                if drop {
                        $entry.remove_entry();
                }
@@ -1335,16 +1366,19 @@ macro_rules! handle_monitor_err {
        } };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { {
                debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst);
-               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, true, Vec::new(), Vec::new(), Vec::new(), $chan_id)
+               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
        } };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => {
-               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
+               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
+       };
+       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_funding_locked: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => {
+               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, $resend_funding_locked, Vec::new(), Vec::new(), Vec::new())
        };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
-               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new(), Vec::new())
+               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
        };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
-               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, Vec::new())
+               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new())
        };
 }
 
@@ -1400,7 +1434,7 @@ macro_rules! handle_chan_restoration_locked {
                let res = loop {
                        let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve
                        if !forwards.is_empty() {
-                               htlc_forwards = Some(($channel_entry.get().get_short_channel_id().expect("We can't have pending forwards before funding confirmation"),
+                               htlc_forwards = Some(($channel_entry.get().get_short_channel_id().unwrap_or($channel_entry.get().outbound_scid_alias()),
                                        $channel_entry.get().get_funding_txo().unwrap(), forwards));
                        }
 
@@ -1708,6 +1742,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        // `have_received_message` indicates that type negotiation has completed.
                                        channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None },
                                        short_channel_id: channel.get_short_channel_id(),
+                                       outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None },
                                        inbound_scid_alias: channel.latest_inbound_scid_alias(),
                                        channel_value_satoshis: channel.get_value_satoshis(),
                                        unspendable_punishment_reserve: to_self_reserve_satoshis,
@@ -2260,7 +2295,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
                                                }
                                                else if code == 0x1000 | 20 {
-                                                       // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
+                                                       // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
                                                        0u16.write(&mut res).expect("Writes cannot fail");
                                                }
                                                (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
@@ -2287,6 +2322,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                action: msgs::ErrorAction::IgnoreError
                        });
                }
+               if chan.get_short_channel_id().is_none() {
+                       return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
+               }
                log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id()));
                self.get_channel_update_for_unicast(chan)
        }
@@ -2298,7 +2336,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// May be called with channel_state already locked!
        fn get_channel_update_for_unicast(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
                log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
-               let short_channel_id = match chan.get_short_channel_id() {
+               let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) {
                        None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
                        Some(id) => id,
                };
@@ -4163,20 +4201,45 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
-       /// Called to accept a request to open a channel after [`Event::OpenChannelRequest`] has been
-       /// triggered.
+       /// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
        ///
        /// The `temporary_channel_id` parameter indicates which inbound channel should be accepted,
        /// and the `counterparty_node_id` parameter is the id of the peer which has requested to open
        /// the channel.
        ///
-       /// For inbound channels, the `user_channel_id` parameter will be provided back in
+       /// The `user_channel_id` parameter will be provided back in
        /// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond
-       /// with which `accept_inbound_channel` call.
+       /// with which `accept_inbound_channel`/`accept_inbound_channel_from_trusted_peer_0conf` call.
        ///
        /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
        /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
        pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u64) -> Result<(), APIError> {
+               self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, false, user_channel_id)
+       }
+
+       /// Accepts a request to open a channel after a [`events::Event::OpenChannelRequest`], treating
+       /// it as confirmed immediately.
+       ///
+       /// The `user_channel_id` parameter will be provided back in
+       /// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond
+       /// with which `accept_inbound_channel`/`accept_inbound_channel_from_trusted_peer_0conf` call.
+       ///
+       /// Unlike [`ChannelManager::accept_inbound_channel`], this method accepts the incoming channel
+       /// and (if the counterparty agrees), enables forwarding of payments immediately.
+       ///
+       /// This fully trusts that the counterparty has honestly and correctly constructed the funding
+       /// transaction and blindly assumes that it will eventually confirm.
+       ///
+       /// If it does not confirm before we decide to close the channel, or if the funding transaction
+       /// does not pay to the correct script the correct amount, *you will lose funds*.
+       ///
+       /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
+       /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
+       pub fn accept_inbound_channel_from_trusted_peer_0conf(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u64) -> Result<(), APIError> {
+               self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, true, user_channel_id)
+       }
+
+       fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u64) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
                let mut channel_state_lock = self.channel_state.lock().unwrap();
@@ -4189,6 +4252,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                if *counterparty_node_id != channel.get().get_counterparty_node_id() {
                                        return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
                                }
+                               if accept_0conf { channel.get_mut().set_0conf(); }
                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
                                        node_id: channel.get().get_counterparty_node_id(),
                                        msg: channel.get_mut().accept_inbound_channel(user_channel_id),
@@ -4280,7 +4344,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        }
 
        fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
-               let ((funding_msg, monitor), mut chan) = {
+               let ((funding_msg, monitor, mut funding_locked), mut chan) = {
                        let best_block = *self.best_block.read().unwrap();
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
@@ -4315,7 +4379,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
                                        // accepted payment from yet. We do, however, need to wait to send our funding_locked
                                        // until we have persisted our monitor.
-                                       chan.monitor_update_failed(false, false, Vec::new(), Vec::new(), Vec::new());
+                                       chan.monitor_update_failed(false, false, funding_locked.is_some(), Vec::new(), Vec::new(), Vec::new());
+                                       funding_locked = None; // Don't send the funding_locked now
                                },
                        }
                }
@@ -4330,6 +4395,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        node_id: counterparty_node_id.clone(),
                                        msg: funding_msg,
                                });
+                               if let Some(msg) = funding_locked {
+                                       send_funding_locked!(channel_state.short_to_id, channel_state.pending_msg_events, chan, msg);
+                               }
                                e.insert(chan);
                        }
                }
@@ -4346,12 +4414,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
-                                       let (monitor, funding_tx) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) {
+                                       let (monitor, funding_tx, funding_locked) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) {
                                                Ok(update) => update,
                                                Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
                                        };
                                        if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
-                                               let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
+                                               let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, funding_locked.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
                                                if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
                                                        // We weren't able to watch the channel to begin with, so no updates should be made on
                                                        // it. Previously, full_stack_target found an (unreachable) panic when the
@@ -4362,6 +4430,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                }
                                                return res
                                        }
+                                       if let Some(msg) = funding_locked {
+                                               send_funding_locked!(channel_state.short_to_id, channel_state.pending_msg_events, chan.get(), msg);
+                                       }
                                        funding_tx
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
@@ -4712,7 +4783,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                } else {
                                                        if let Err(e) = handle_monitor_err!(self, e, channel_state, chan,
                                                                        RAACommitmentOrder::CommitmentFirst, false,
-                                                                       raa_updates.commitment_update.is_some(),
+                                                                       raa_updates.commitment_update.is_some(), false,
                                                                        raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
                                                                        raa_updates.finalized_claimed_htlcs) {
                                                                break Err(e);
@@ -4728,7 +4799,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
                                                        raa_updates.finalized_claimed_htlcs,
                                                        chan.get().get_short_channel_id()
-                                                               .expect("RAA should only work on a short-id-available channel"),
+                                                               .unwrap_or(chan.get().outbound_scid_alias()),
                                                        chan.get().get_funding_txo().unwrap()))
                                },
                                hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
@@ -4893,7 +4964,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        MonitorEvent::HTLCEvent(htlc_update) => {
                                                if let Some(preimage) = htlc_update.payment_preimage {
                                                        log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
-                                                       self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.onchain_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id());
+                                                       self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id());
                                                } else {
                                                        log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
                                                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
@@ -5567,6 +5638,19 @@ where
                                                        }
                                                }
                                        }
+                                       if channel.is_our_funding_locked() {
+                                               if let Some(real_scid) = channel.get_short_channel_id() {
+                                                       // If we sent a 0conf funding_locked, and now have an SCID, we add it
+                                                       // to the short_to_id map here. Note that we check whether we can relay
+                                                       // using the real SCID at relay-time (i.e. enforce option_scid_alias
+                                                       // then), and if the funding tx is ever un-confirmed we force-close the
+                                                       // channel, ensuring short_to_id is always consistent.
+                                                       let scid_insert = short_to_id.insert(real_scid, channel.channel_id());
+                                                       assert!(scid_insert.is_none() || scid_insert.unwrap() == channel.channel_id(),
+                                                               "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
+                                                               fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
+                                               }
+                                       }
                                } else if let Err(reason) = res {
                                        update_maps_on_chan_removal!(self, short_to_id, channel);
                                        // It looks like our counterparty went on-chain or funding transaction was
@@ -5979,6 +6063,7 @@ impl_writeable_tlv_based!(ChannelDetails, {
        (2, channel_id, required),
        (3, channel_type, option),
        (4, counterparty, required),
+       (5, outbound_scid_alias, option),
        (6, funding_txo, option),
        (8, short_channel_id, option),
        (10, channel_value_satoshis, required),
index 2580874640e438e611bea9b88ddda0384dc9db5d..5c4a94dabfd59c367a91012719d5b5c82de609f8 100644 (file)
@@ -19,7 +19,7 @@
 //! supports a feature if it advertises the feature (as either required or optional) to its peers.
 //! And the implementation can interpret a feature if the feature is known to it.
 //!
-//! [BOLT #9]: https://github.com/lightningnetwork/lightning-rfc/blob/master/09-features.md
+//! [BOLT #9]: https://github.com/lightning/bolts/blob/master/09-features.md
 //! [messages]: crate::ln::msgs
 
 use {io, io_extras};
@@ -244,7 +244,7 @@ mod sealed {
                        ///
                        /// See [BOLT #9] for details.
                        ///
-                       /// [BOLT #9]: https://github.com/lightningnetwork/lightning-rfc/blob/master/09-features.md
+                       /// [BOLT #9]: https://github.com/lightning/bolts/blob/master/09-features.md
                        pub trait $feature: Context {
                                /// The bit used to signify that the feature is required.
                                const EVEN_BIT: usize = $odd_bit - 1;
index e176782b658d69676b2e7242e8feac48a4304fee..31fdc6ffae4514411b33003574c14c362df69025 100644 (file)
@@ -21,6 +21,7 @@ use ln::features::{InitFeatures, InvoiceFeatures};
 use ln::msgs;
 use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler};
 use util::enforcing_trait_impls::EnforcingSigner;
+use util::scid_utils;
 use util::test_utils;
 use util::test_utils::{panicking, TestChainMonitor};
 use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
@@ -50,9 +51,13 @@ pub const CHAN_CONFIRM_DEPTH: u32 = 10;
 
 /// Mine the given transaction in the next block and then mine CHAN_CONFIRM_DEPTH - 1 blocks on
 /// top, giving the given transaction CHAN_CONFIRM_DEPTH confirmations.
-pub fn confirm_transaction<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction) {
-       confirm_transaction_at(node, tx, node.best_block_info().1 + 1);
+///
+/// Returns the SCID a channel confirmed in the given transaction will have, assuming the funding
+/// output is the 1st output in the transaction.
+pub fn confirm_transaction<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction) -> u64 {
+       let scid = confirm_transaction_at(node, tx, node.best_block_info().1 + 1);
        connect_blocks(node, CHAN_CONFIRM_DEPTH - 1);
+       scid
 }
 /// Mine a signle block containing the given transaction
 pub fn mine_transaction<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction) {
@@ -61,7 +66,10 @@ pub fn mine_transaction<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transac
 }
 /// Mine the given transaction at the given height, mining blocks as required to build to that
 /// height
-pub fn confirm_transaction_at<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction, conf_height: u32) {
+///
+/// Returns the SCID a channel confirmed in the given transaction will have, assuming the funding
+/// output is the 1st output in the transaction.
+pub fn confirm_transaction_at<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction, conf_height: u32) -> u64 {
        let first_connect_height = node.best_block_info().1 + 1;
        assert!(first_connect_height <= conf_height);
        if conf_height > first_connect_height {
@@ -76,31 +84,64 @@ pub fn confirm_transaction_at<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &T
        }
        block.txdata.push(tx.clone());
        connect_block(node, &block);
+       scid_utils::scid_from_parts(conf_height as u64, block.txdata.len() as u64 - 1, 0).unwrap()
 }
 
 /// The possible ways we may notify a ChannelManager of a new block
-#[derive(Clone, Copy, PartialEq)]
+#[derive(Clone, Copy, Debug, PartialEq)]
 pub enum ConnectStyle {
-       /// Calls best_block_updated first, detecting transactions in the block only after receiving the
-       /// header and height information.
+       /// Calls `best_block_updated` first, detecting transactions in the block only after receiving
+       /// the header and height information.
        BestBlockFirst,
-       /// The same as BestBlockFirst, however when we have multiple blocks to connect, we only
-       /// make a single best_block_updated call.
+       /// The same as `BestBlockFirst`, however when we have multiple blocks to connect, we only
+       /// make a single `best_block_updated` call.
        BestBlockFirstSkippingBlocks,
-       /// Calls transactions_confirmed first, detecting transactions in the block before updating the
-       /// header and height information.
+       /// The same as `BestBlockFirst` when connecting blocks. During disconnection only
+       /// `transaction_unconfirmed` is called.
+       BestBlockFirstReorgsOnlyTip,
+       /// Calls `transactions_confirmed` first, detecting transactions in the block before updating
+       /// the header and height information.
        TransactionsFirst,
-       /// The same as TransactionsFirst, however when we have multiple blocks to connect, we only
-       /// make a single best_block_updated call.
+       /// The same as `TransactionsFirst`, however when we have multiple blocks to connect, we only
+       /// make a single `best_block_updated` call.
        TransactionsFirstSkippingBlocks,
-       /// Provides the full block via the chain::Listen interface. In the current code this is
-       /// equivalent to TransactionsFirst with some additional assertions.
+       /// The same as `TransactionsFirst` when connecting blocks. During disconnection only
+       /// `transaction_unconfirmed` is called.
+       TransactionsFirstReorgsOnlyTip,
+       /// Provides the full block via the `chain::Listen` interface. In the current code this is
+       /// equivalent to `TransactionsFirst` with some additional assertions.
        FullBlockViaListen,
 }
 
+impl ConnectStyle {
+       fn random_style() -> ConnectStyle {
+               #[cfg(feature = "std")] {
+                       use core::hash::{BuildHasher, Hasher};
+                       // Get a random value using the only std API to do so - the DefaultHasher
+                       let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
+                       let res = match rand_val % 7 {
+                               0 => ConnectStyle::BestBlockFirst,
+                               1 => ConnectStyle::BestBlockFirstSkippingBlocks,
+                               2 => ConnectStyle::BestBlockFirstReorgsOnlyTip,
+                               3 => ConnectStyle::TransactionsFirst,
+                               4 => ConnectStyle::TransactionsFirstSkippingBlocks,
+                               5 => ConnectStyle::TransactionsFirstReorgsOnlyTip,
+                               6 => ConnectStyle::FullBlockViaListen,
+                               _ => unreachable!(),
+                       };
+                       eprintln!("Using Block Connection Style: {:?}", res);
+                       res
+               }
+               #[cfg(not(feature = "std"))] {
+                       ConnectStyle::FullBlockViaListen
+               }
+       }
+}
+
 pub fn connect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, depth: u32) -> BlockHash {
        let skip_intermediaries = match *node.connect_style.borrow() {
-               ConnectStyle::BestBlockFirstSkippingBlocks|ConnectStyle::TransactionsFirstSkippingBlocks => true,
+               ConnectStyle::BestBlockFirstSkippingBlocks|ConnectStyle::TransactionsFirstSkippingBlocks|
+                       ConnectStyle::BestBlockFirstReorgsOnlyTip|ConnectStyle::TransactionsFirstReorgsOnlyTip => true,
                _ => false,
        };
 
@@ -111,18 +152,20 @@ pub fn connect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, depth: u32) ->
        };
        assert!(depth >= 1);
        for i in 1..depth {
-               do_connect_block(node, &block, skip_intermediaries);
+               let prev_blockhash = block.header.block_hash();
+               do_connect_block(node, block, skip_intermediaries);
                block = Block {
-                       header: BlockHeader { version: 0x20000000, prev_blockhash: block.header.block_hash(), merkle_root: Default::default(), time: height + i, bits: 42, nonce: 42 },
+                       header: BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: Default::default(), time: height + i, bits: 42, nonce: 42 },
                        txdata: vec![],
                };
        }
-       connect_block(node, &block);
-       block.header.block_hash()
+       let hash = block.header.block_hash();
+       do_connect_block(node, block, false);
+       hash
 }
 
 pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block) {
-       do_connect_block(node, block, false);
+       do_connect_block(node, block.clone(), false);
 }
 
 fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) {
@@ -132,20 +175,23 @@ fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) {
        }
 }
 
-fn do_connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block, skip_intermediaries: bool) {
+fn do_connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, skip_intermediaries: bool) {
        call_claimable_balances(node);
        let height = node.best_block_info().1 + 1;
+       #[cfg(feature = "std")] {
+               eprintln!("Connecting block using Block Connection Style: {:?}", *node.connect_style.borrow());
+       }
        if !skip_intermediaries {
                let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
                match *node.connect_style.borrow() {
-                       ConnectStyle::BestBlockFirst|ConnectStyle::BestBlockFirstSkippingBlocks => {
+                       ConnectStyle::BestBlockFirst|ConnectStyle::BestBlockFirstSkippingBlocks|ConnectStyle::BestBlockFirstReorgsOnlyTip => {
                                node.chain_monitor.chain_monitor.best_block_updated(&block.header, height);
                                call_claimable_balances(node);
                                node.chain_monitor.chain_monitor.transactions_confirmed(&block.header, &txdata, height);
                                node.node.best_block_updated(&block.header, height);
                                node.node.transactions_confirmed(&block.header, &txdata, height);
                        },
-                       ConnectStyle::TransactionsFirst|ConnectStyle::TransactionsFirstSkippingBlocks => {
+                       ConnectStyle::TransactionsFirst|ConnectStyle::TransactionsFirstSkippingBlocks|ConnectStyle::TransactionsFirstReorgsOnlyTip => {
                                node.chain_monitor.chain_monitor.transactions_confirmed(&block.header, &txdata, height);
                                call_claimable_balances(node);
                                node.chain_monitor.chain_monitor.best_block_updated(&block.header, height);
@@ -160,30 +206,39 @@ fn do_connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block, s
        }
        call_claimable_balances(node);
        node.node.test_process_background_events();
-       node.blocks.lock().unwrap().push((block.header, height));
+       node.blocks.lock().unwrap().push((block, height));
 }
 
 pub fn disconnect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, count: u32) {
        call_claimable_balances(node);
+       #[cfg(feature = "std")] {
+               eprintln!("Disconnecting {} blocks using Block Connection Style: {:?}", count, *node.connect_style.borrow());
+       }
        for i in 0..count {
-               let orig_header = node.blocks.lock().unwrap().pop().unwrap();
-               assert!(orig_header.1 > 0); // Cannot disconnect genesis
-               let prev_header = node.blocks.lock().unwrap().last().unwrap().clone();
+               let orig = node.blocks.lock().unwrap().pop().unwrap();
+               assert!(orig.1 > 0); // Cannot disconnect genesis
+               let prev = node.blocks.lock().unwrap().last().unwrap().clone();
 
                match *node.connect_style.borrow() {
                        ConnectStyle::FullBlockViaListen => {
-                               node.chain_monitor.chain_monitor.block_disconnected(&orig_header.0, orig_header.1);
-                               Listen::block_disconnected(node.node, &orig_header.0, orig_header.1);
+                               node.chain_monitor.chain_monitor.block_disconnected(&orig.0.header, orig.1);
+                               Listen::block_disconnected(node.node, &orig.0.header, orig.1);
                        },
                        ConnectStyle::BestBlockFirstSkippingBlocks|ConnectStyle::TransactionsFirstSkippingBlocks => {
                                if i == count - 1 {
-                                       node.chain_monitor.chain_monitor.best_block_updated(&prev_header.0, prev_header.1);
-                                       node.node.best_block_updated(&prev_header.0, prev_header.1);
+                                       node.chain_monitor.chain_monitor.best_block_updated(&prev.0.header, prev.1);
+                                       node.node.best_block_updated(&prev.0.header, prev.1);
+                               }
+                       },
+                       ConnectStyle::BestBlockFirstReorgsOnlyTip|ConnectStyle::TransactionsFirstReorgsOnlyTip => {
+                               for tx in orig.0.txdata {
+                                       node.chain_monitor.chain_monitor.transaction_unconfirmed(&tx.txid());
+                                       node.node.transaction_unconfirmed(&tx.txid());
                                }
                        },
                        _ => {
-                               node.chain_monitor.chain_monitor.best_block_updated(&prev_header.0, prev_header.1);
-                               node.node.best_block_updated(&prev_header.0, prev_header.1);
+                               node.chain_monitor.chain_monitor.best_block_updated(&prev.0.header, prev.1);
+                               node.node.best_block_updated(&prev.0.header, prev.1);
                        },
                }
                call_claimable_balances(node);
@@ -229,7 +284,7 @@ pub struct Node<'a, 'b: 'a, 'c: 'b> {
        pub network_payment_count: Rc<RefCell<u8>>,
        pub network_chan_count: Rc<RefCell<u32>>,
        pub logger: &'c test_utils::TestLogger,
-       pub blocks: Arc<Mutex<Vec<(BlockHeader, u32)>>>,
+       pub blocks: Arc<Mutex<Vec<(Block, u32)>>>,
        pub connect_style: Rc<RefCell<ConnectStyle>>,
 }
 impl<'a, 'b, 'c> Node<'a, 'b, 'c> {
@@ -240,7 +295,7 @@ impl<'a, 'b, 'c> Node<'a, 'b, 'c> {
                self.blocks.lock().unwrap().last().map(|(a, b)| (a.block_hash(), *b)).unwrap()
        }
        pub fn get_block_header(&self, height: u32) -> BlockHeader {
-               self.blocks.lock().unwrap()[height as usize].0
+               self.blocks.lock().unwrap()[height as usize].0.header
        }
 }
 
@@ -1860,7 +1915,7 @@ pub fn create_chanmon_cfgs(node_count: usize) -> Vec<TestChanMonCfg> {
        for i in 0..node_count {
                let tx_broadcaster = test_utils::TestBroadcaster {
                        txn_broadcasted: Mutex::new(Vec::new()),
-                       blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet).header, 0)])),
+                       blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet), 0)])),
                };
                let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
                let chain_source = test_utils::TestChainSource::new(Network::Testnet);
@@ -1934,7 +1989,7 @@ pub fn create_network<'a, 'b: 'a, 'c: 'b>(node_count: usize, cfgs: &'b Vec<NodeC
        let mut nodes = Vec::new();
        let chan_count = Rc::new(RefCell::new(0));
        let payment_count = Rc::new(RefCell::new(0));
-       let connect_style = Rc::new(RefCell::new(ConnectStyle::FullBlockViaListen));
+       let connect_style = Rc::new(RefCell::new(ConnectStyle::random_style()));
 
        for i in 0..node_count {
                let net_graph_msg_handler = NetGraphMsgHandler::new(cfgs[i].network_graph, None, cfgs[i].logger);
index d1a68a90823b2783e660788effb938c29e795e93..8eb001941f763a51fd8a6535f814f8b6e7c528a3 100644 (file)
@@ -1281,23 +1281,41 @@ fn test_duplicate_htlc_different_direction_onchain() {
        check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
 
-       // Check we only broadcast 1 timeout tx
        let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(claim_txn.len(), 8);
-       assert_eq!(claim_txn[1], claim_txn[4]);
-       assert_eq!(claim_txn[2], claim_txn[5]);
-       check_spends!(claim_txn[1], chan_1.3);
-       check_spends!(claim_txn[2], claim_txn[1]);
-       check_spends!(claim_txn[7], claim_txn[1]);
+
+       check_spends!(claim_txn[0], remote_txn[0]); // Immediate HTLC claim with preimage
+
+       check_spends!(claim_txn[1], chan_1.3); // Alternative commitment tx
+       check_spends!(claim_txn[2], claim_txn[1]); // HTLC spend in alternative commitment tx
+
+       let bump_tx = if claim_txn[1] == claim_txn[4] {
+               assert_eq!(claim_txn[1], claim_txn[4]);
+               assert_eq!(claim_txn[2], claim_txn[5]);
+
+               check_spends!(claim_txn[7], claim_txn[1]); // HTLC timeout on alternative commitment tx
+
+               check_spends!(claim_txn[3], remote_txn[0]); // HTLC timeout on broadcasted commitment tx
+               &claim_txn[3]
+       } else {
+               assert_eq!(claim_txn[1], claim_txn[3]);
+               assert_eq!(claim_txn[2], claim_txn[4]);
+
+               check_spends!(claim_txn[5], claim_txn[1]); // HTLC timeout on alternative commitment tx
+
+               check_spends!(claim_txn[7], remote_txn[0]); // HTLC timeout on broadcasted commitment tx
+
+               &claim_txn[7]
+       };
 
        assert_eq!(claim_txn[0].input.len(), 1);
-       assert_eq!(claim_txn[3].input.len(), 1);
-       assert_eq!(claim_txn[0].input[0].previous_output, claim_txn[3].input[0].previous_output);
+       assert_eq!(bump_tx.input.len(), 1);
+       assert_eq!(claim_txn[0].input[0].previous_output, bump_tx.input[0].previous_output);
 
        assert_eq!(claim_txn[0].input.len(), 1);
        assert_eq!(claim_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx
-       check_spends!(claim_txn[0], remote_txn[0]);
        assert_eq!(remote_txn[0].output[claim_txn[0].input[0].previous_output.vout as usize].value, 800);
+
        assert_eq!(claim_txn[6].input.len(), 1);
        assert_eq!(claim_txn[6].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx
        check_spends!(claim_txn[6], remote_txn[0]);
@@ -2356,7 +2374,8 @@ fn test_justice_tx() {
        chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
-       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+       *nodes[0].connect_style.borrow_mut() = ConnectStyle::FullBlockViaListen;
        // Create some new channels:
        let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
@@ -2588,11 +2607,7 @@ fn claim_htlc_outputs_single_tx() {
                expect_payment_failed!(nodes[1], payment_hash_2, true);
 
                let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
-               assert_eq!(node_txn.len(), 9);
-               // ChannelMonitor: justice tx revoked offered htlc, justice tx revoked received htlc, justice tx revoked to_local (3)
-               // ChannelManager: local commmitment + local HTLC-timeout (2)
-               // ChannelMonitor: bumped justice tx, after one increase, bumps on HTLC aren't generated not being substantial anymore, bump on revoked to_local isn't generated due to more room for expiration (2)
-               // ChannelMonitor: local commitment + local HTLC-timeout (2)
+               assert!(node_txn.len() == 9 || node_txn.len() == 10);
 
                // Check the pair local commitment and HTLC-timeout broadcast due to HTLC expiration
                assert_eq!(node_txn[0].input.len(), 1);
@@ -5298,21 +5313,30 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        let htlc_timeout_tx;
        { // Extract one of the two HTLC-Timeout transaction
                let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
-               // ChannelMonitor: timeout tx * 3, ChannelManager: local commitment tx
-               assert_eq!(node_txn.len(), 4);
+               // ChannelMonitor: timeout tx * 2-or-3, ChannelManager: local commitment tx
+               assert!(node_txn.len() == 4 || node_txn.len() == 3);
                check_spends!(node_txn[0], chan_2.3);
 
                check_spends!(node_txn[1], commitment_txn[0]);
                assert_eq!(node_txn[1].input.len(), 1);
-               check_spends!(node_txn[2], commitment_txn[0]);
-               assert_eq!(node_txn[2].input.len(), 1);
-               assert_eq!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
-               check_spends!(node_txn[3], commitment_txn[0]);
-               assert_ne!(node_txn[1].input[0].previous_output, node_txn[3].input[0].previous_output);
+
+               if node_txn.len() > 3 {
+                       check_spends!(node_txn[2], commitment_txn[0]);
+                       assert_eq!(node_txn[2].input.len(), 1);
+                       assert_eq!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
+
+                       check_spends!(node_txn[3], commitment_txn[0]);
+                       assert_ne!(node_txn[1].input[0].previous_output, node_txn[3].input[0].previous_output);
+               } else {
+                       check_spends!(node_txn[2], commitment_txn[0]);
+                       assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
+               }
 
                assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
                assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
-               assert_eq!(node_txn[3].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+               if node_txn.len() > 3 {
+                       assert_eq!(node_txn[3].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+               }
                htlc_timeout_tx = node_txn[1].clone();
        }
 
@@ -7982,13 +8006,24 @@ fn test_bump_penalty_txn_on_remote_commitment() {
                assert_eq!(node_txn[6].input.len(), 1);
                check_spends!(node_txn[0], remote_txn[0]);
                check_spends!(node_txn[6], remote_txn[0]);
-               assert_eq!(node_txn[0].input[0].previous_output, node_txn[3].input[0].previous_output);
-               preimage_bump = node_txn[3].clone();
 
                check_spends!(node_txn[1], chan.3);
                check_spends!(node_txn[2], node_txn[1]);
-               assert_eq!(node_txn[1], node_txn[4]);
-               assert_eq!(node_txn[2], node_txn[5]);
+
+               if node_txn[0].input[0].previous_output == node_txn[3].input[0].previous_output {
+                       preimage_bump = node_txn[3].clone();
+                       check_spends!(node_txn[3], remote_txn[0]);
+
+                       assert_eq!(node_txn[1], node_txn[4]);
+                       assert_eq!(node_txn[2], node_txn[5]);
+               } else {
+                       preimage_bump = node_txn[7].clone();
+                       check_spends!(node_txn[7], remote_txn[0]);
+                       assert_eq!(node_txn[0].input[0].previous_output, node_txn[7].input[0].previous_output);
+
+                       assert_eq!(node_txn[1], node_txn[3]);
+                       assert_eq!(node_txn[2], node_txn[4]);
+               }
 
                timeout = node_txn[6].txid();
                let index = node_txn[6].input[0].previous_output.vout;
@@ -8737,10 +8772,11 @@ fn test_update_err_monitor_lockdown() {
                watchtower
        };
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let block = Block { header, txdata: vec![] };
        // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
        // transaction lock time requirements here.
-       chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (header, 0));
-       watchtower.chain_monitor.block_connected(&Block { header, txdata: vec![] }, 200);
+       chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (block.clone(), 0));
+       watchtower.chain_monitor.block_connected(&block, 200);
 
        // Try to update ChannelMonitor
        nodes[1].node.claim_funds(preimage);
@@ -8800,10 +8836,11 @@ fn test_concurrent_monitor_claim() {
                watchtower
        };
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let block = Block { header, txdata: vec![] };
        // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
        // transaction lock time requirements here.
-       chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize((CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS) as usize, (header, 0));
-       watchtower_alice.chain_monitor.block_connected(&Block { header, txdata: vec![] }, CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
+       chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize((CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS) as usize, (block.clone(), 0));
+       watchtower_alice.chain_monitor.block_connected(&block, CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
 
        // Watchtower Alice should have broadcast a commitment/HTLC-timeout
        {
@@ -9249,7 +9286,11 @@ fn test_duplicate_chan_id() {
 
        let funding_created = {
                let mut a_channel_lock = nodes[0].node.channel_state.lock().unwrap();
-               let mut as_chan = a_channel_lock.by_id.get_mut(&open_chan_2_msg.temporary_channel_id).unwrap();
+               // Once we call `get_outbound_funding_created` the channel has a duplicate channel_id as
+               // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
+               // try to create another channel. Instead, we drop the channel entirely here (leaving the
+               // channelmanager in a possibly nonsense state instead).
+               let mut as_chan = a_channel_lock.by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
                let logger = test_utils::TestLogger::new();
                as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap()
        };
@@ -9287,7 +9328,7 @@ fn test_duplicate_chan_id() {
        let events_4 = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events_4.len(), 0);
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
-       assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].txid(), funding_output.txid);
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
 
        let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
        let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
index 489626a90f22ed4a0afe103da0d54b3112abf7db..492a65537701750f7a97c56ac6cb287da395aac0 100644 (file)
@@ -20,6 +20,7 @@ use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureRe
 use bitcoin::blockdata::script::Builder;
 use bitcoin::blockdata::opcodes;
 use bitcoin::secp256k1::Secp256k1;
+use bitcoin::Transaction;
 
 use prelude::*;
 
@@ -82,6 +83,17 @@ fn chanmon_fail_from_stale_commitment() {
        expect_payment_failed_with_update!(nodes[0], payment_hash, false, update_a.contents.short_channel_id, true);
 }
 
+fn test_spendable_output<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, spendable_tx: &Transaction) {
+       let mut spendable = node.chain_monitor.chain_monitor.get_and_clear_pending_events();
+       assert_eq!(spendable.len(), 1);
+       if let Event::SpendableOutputs { outputs } = spendable.pop().unwrap() {
+               assert_eq!(outputs.len(), 1);
+               let spend_tx = node.keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
+                       Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
+               check_spends!(spend_tx, spendable_tx);
+       } else { panic!(); }
+}
+
 #[test]
 fn chanmon_claim_value_coop_close() {
        // Tests `get_claimable_balances` returns the correct values across a simple cooperative claim.
@@ -155,23 +167,9 @@ fn chanmon_claim_value_coop_close() {
        assert_eq!(Vec::<Balance>::new(),
                nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances());
 
-       let mut node_a_spendable = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
-       assert_eq!(node_a_spendable.len(), 1);
-       if let Event::SpendableOutputs { outputs } = node_a_spendable.pop().unwrap() {
-               assert_eq!(outputs.len(), 1);
-               let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
-                       Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
-               check_spends!(spend_tx, shutdown_tx[0]);
-       }
+       test_spendable_output(&nodes[0], &shutdown_tx[0]);
+       test_spendable_output(&nodes[1], &shutdown_tx[0]);
 
-       let mut node_b_spendable = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events();
-       assert_eq!(node_b_spendable.len(), 1);
-       if let Event::SpendableOutputs { outputs } = node_b_spendable.pop().unwrap() {
-               assert_eq!(outputs.len(), 1);
-               let spend_tx = nodes[1].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
-                       Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
-               check_spends!(spend_tx, shutdown_tx[0]);
-       }
        check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
        check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
 }
@@ -389,15 +387,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
                }]),
                sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances()));
 
-       let mut node_a_spendable = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
-       assert_eq!(node_a_spendable.len(), 1);
-       if let Event::SpendableOutputs { outputs } = node_a_spendable.pop().unwrap() {
-               assert_eq!(outputs.len(), 1);
-               let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
-                       Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
-               check_spends!(spend_tx, remote_txn[0]);
-       }
-
+       test_spendable_output(&nodes[0], &remote_txn[0]);
        assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
 
        // After broadcasting the HTLC claim transaction, node A will still consider the HTLC
@@ -454,14 +444,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
                nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances());
        expect_payment_failed!(nodes[0], timeout_payment_hash, true);
 
-       let mut node_a_spendable = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
-       assert_eq!(node_a_spendable.len(), 1);
-       if let Event::SpendableOutputs { outputs } = node_a_spendable.pop().unwrap() {
-               assert_eq!(outputs.len(), 1);
-               let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
-                       Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
-               check_spends!(spend_tx, a_broadcast_txn[2]);
-       } else { panic!(); }
+       test_spendable_output(&nodes[0], &a_broadcast_txn[2]);
 
        // Node B will no longer consider the HTLC "contentious" after the HTLC claim transaction
        // confirms, and consider it simply "awaiting confirmations". Note that it has to wait for the
@@ -484,15 +467,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
        // After reaching the commitment output CSV, we'll get a SpendableOutputs event for it and have
        // only the HTLCs claimable on node B.
        connect_blocks(&nodes[1], node_b_commitment_claimable - nodes[1].best_block_info().1);
-
-       let mut node_b_spendable = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events();
-       assert_eq!(node_b_spendable.len(), 1);
-       if let Event::SpendableOutputs { outputs } = node_b_spendable.pop().unwrap() {
-               assert_eq!(outputs.len(), 1);
-               let spend_tx = nodes[1].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
-                       Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
-               check_spends!(spend_tx, remote_txn[0]);
-       }
+       test_spendable_output(&nodes[1], &remote_txn[0]);
 
        assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations {
                        claimable_amount_satoshis: 3_000,
@@ -506,15 +481,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) {
        // After reaching the claimed HTLC output CSV, we'll get a SpendableOutptus event for it and
        // have only one HTLC output left spendable.
        connect_blocks(&nodes[1], node_b_htlc_claimable - nodes[1].best_block_info().1);
-
-       let mut node_b_spendable = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events();
-       assert_eq!(node_b_spendable.len(), 1);
-       if let Event::SpendableOutputs { outputs } = node_b_spendable.pop().unwrap() {
-               assert_eq!(outputs.len(), 1);
-               let spend_tx = nodes[1].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
-                       Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
-               check_spends!(spend_tx, b_broadcast_txn[0]);
-       } else { panic!(); }
+       test_spendable_output(&nodes[1], &b_broadcast_txn[0]);
 
        assert_eq!(vec![Balance::ContentiousClaimable {
                        claimable_amount_satoshis: 4_000,
@@ -710,25 +677,11 @@ fn test_balances_on_local_commitment_htlcs() {
                        confirmation_height: node_a_htlc_claimable,
                }],
                nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances());
-       let mut node_a_spendable = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
-       assert_eq!(node_a_spendable.len(), 1);
-       if let Event::SpendableOutputs { outputs } = node_a_spendable.pop().unwrap() {
-               assert_eq!(outputs.len(), 1);
-               let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
-                       Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
-               check_spends!(spend_tx, as_txn[0]);
-       }
+       test_spendable_output(&nodes[0], &as_txn[0]);
 
        // Connect blocks until the HTLC-Timeout's CSV expires, providing us the relevant
        // `SpendableOutputs` event and removing the claimable balance entry.
        connect_blocks(&nodes[0], node_a_htlc_claimable - nodes[0].best_block_info().1);
        assert!(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances().is_empty());
-       let mut node_a_spendable = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
-       assert_eq!(node_a_spendable.len(), 1);
-       if let Event::SpendableOutputs { outputs } = node_a_spendable.pop().unwrap() {
-               assert_eq!(outputs.len(), 1);
-               let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
-                       Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
-               check_spends!(spend_tx, as_txn[1]);
-       }
+       test_spendable_output(&nodes[0], &as_txn[1]);
 }
index 281a2a8e977123158f5daeba45ff8666c96ec403..5aa48c32bf07686aba4adad53b9615a6cdd91bb1 100644 (file)
@@ -630,7 +630,10 @@ pub struct UnsignedChannelUpdate {
        pub fee_base_msat: u32,
        /// The amount to fee multiplier, in micro-satoshi
        pub fee_proportional_millionths: u32,
-       pub(crate) excess_data: Vec<u8>,
+       /// Excess data which was signed as a part of the message which we do not (yet) understand how
+       /// to decode. This is stored to ensure forward-compatibility as new fields are added to the
+       /// lightning gossip
+       pub excess_data: Vec<u8>,
 }
 /// A channel_update message to be sent or received from a peer
 #[derive(Clone, Debug, PartialEq)]
index 4fe3cef0d570c6013ba9d7f0851b88c8f5f8df3c..07e531c5b4b7e330b252414a89207ea70e698bc2 100644 (file)
@@ -367,7 +367,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
-       let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+       let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
        let (_, _, chan_id_2, _) = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
 
        // Serialize the ChannelManager prior to sending payments
@@ -484,7 +484,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
 
        if confirm_before_reload {
                let best_block = nodes[0].blocks.lock().unwrap().last().unwrap().clone();
-               nodes[0].node.best_block_updated(&best_block.0, best_block.1);
+               nodes[0].node.best_block_updated(&best_block.0.header, best_block.1);
        }
 
        // Create a new channel on which to retry the payment before we fail the payment via the
@@ -506,14 +506,19 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        expect_payment_sent!(nodes[0], payment_preimage_1);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV*4 + 20);
        let as_htlc_timeout_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-       check_spends!(as_htlc_timeout_txn[2], funding_tx);
-       check_spends!(as_htlc_timeout_txn[0], as_commitment_tx);
-       check_spends!(as_htlc_timeout_txn[1], as_commitment_tx);
        assert_eq!(as_htlc_timeout_txn.len(), 3);
-       if as_htlc_timeout_txn[0].input[0].previous_output == bs_htlc_claim_txn[0].input[0].previous_output {
-               confirm_transaction(&nodes[0], &as_htlc_timeout_txn[1]);
+       let (first_htlc_timeout_tx, second_htlc_timeout_tx) = if as_htlc_timeout_txn[0] == as_commitment_tx {
+               (&as_htlc_timeout_txn[1], &as_htlc_timeout_txn[2])
        } else {
-               confirm_transaction(&nodes[0], &as_htlc_timeout_txn[0]);
+               assert_eq!(as_htlc_timeout_txn[2], as_commitment_tx);
+               (&as_htlc_timeout_txn[0], &as_htlc_timeout_txn[1])
+       };
+       check_spends!(first_htlc_timeout_tx, as_commitment_tx);
+       check_spends!(second_htlc_timeout_tx, as_commitment_tx);
+       if first_htlc_timeout_tx.input[0].previous_output == bs_htlc_claim_txn[0].input[0].previous_output {
+               confirm_transaction(&nodes[0], &second_htlc_timeout_tx);
+       } else {
+               confirm_transaction(&nodes[0], &first_htlc_timeout_tx);
        }
        nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
        expect_payment_failed_conditions!(nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
@@ -630,7 +635,8 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
        let mon_updates: Vec<_> = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap()
                .get_mut(&funding_txo).unwrap().drain().collect();
-       assert_eq!(mon_updates.len(), 1);
+       // If we are using chain::Confirm instead of chain::Listen, we will get the same update twice
+       assert!(mon_updates.len() == 1 || mon_updates.len() == 2);
        assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
        assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
 
@@ -646,7 +652,9 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        chanmon_cfgs[0].persister.set_update_ret(Ok(()));
        let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
        get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
-       nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, mon_updates[0]).unwrap();
+       for update in mon_updates {
+               nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, update).unwrap();
+       }
        if payment_timeout {
                expect_payment_failed!(nodes[0], payment_hash, true);
        } else {
index 4e85ae37d70125f3e4cb49a2723c4828f5efdad1..29fa84505fcefc55a255b842f05fd5eca84bed65 100644 (file)
@@ -25,8 +25,8 @@ use util::crypto::hkdf_extract_expand_twice;
 use bitcoin::hashes::hex::ToHex;
 
 /// Maximum Lightning message data length according to
-/// [BOLT-8](https://github.com/lightningnetwork/lightning-rfc/blob/v1.0/08-transport.md#lightning-message-specification)
-/// and [BOLT-1](https://github.com/lightningnetwork/lightning-rfc/blob/master/01-messaging.md#lightning-message-format):
+/// [BOLT-8](https://github.com/lightning/bolts/blob/v1.0/08-transport.md#lightning-message-specification)
+/// and [BOLT-1](https://github.com/lightning/bolts/blob/master/01-messaging.md#lightning-message-format):
 pub const LN_MAX_MSG_LEN: usize = ::core::u16::MAX as usize; // Must be equal to 65535
 
 // Sha256("Noise_XK_secp256k1_ChaChaPoly_SHA256")
index 47e2fb33e3c7321103350865f97b5f5cc5897d14..f4f52a8baf49cce4b0d1f586e92e27a72344a328 100644 (file)
@@ -11,7 +11,7 @@
 //! other behavior that exists only on private channels or with a semi-trusted counterparty (eg
 //! LSP).
 
-use chain::Watch;
+use chain::{ChannelMonitorUpdateErr, Watch};
 use chain::channelmonitor::ChannelMonitor;
 use chain::keysinterface::{Recipient, KeysInterface};
 use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, MIN_CLTV_EXPIRY_DELTA};
@@ -22,7 +22,7 @@ use ln::msgs;
 use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField, ChannelUpdate};
 use ln::wire::Encode;
 use util::enforcing_trait_impls::EnforcingSigner;
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{ClosureReason, Event, MessageSendEvent, MessageSendEventsProvider};
 use util::config::UserConfig;
 use util::ser::{Writeable, ReadableArgs};
 use util::test_utils;
@@ -564,3 +564,361 @@ fn test_scid_alias_returned() {
                PaymentFailedConditions::new().blamed_scid(last_hop[0].inbound_scid_alias.unwrap())
                        .blamed_chan_closed(false).expected_htlc_error_data(0x1000|12, &err_data));
 }
+
+// Receiver must have been initialized with manually_accept_inbound_channels set to true.
+fn open_zero_conf_channel<'a, 'b, 'c, 'd>(initiator: &'a Node<'b, 'c, 'd>, receiver: &'a Node<'b, 'c, 'd>, initiator_config: Option<UserConfig>) -> bitcoin::Transaction {
+       initiator.node.create_channel(receiver.node.get_our_node_id(), 100_000, 10_001, 42, initiator_config).unwrap();
+       let open_channel = get_event_msg!(initiator, MessageSendEvent::SendOpenChannel, receiver.node.get_our_node_id());
+
+       receiver.node.handle_open_channel(&initiator.node.get_our_node_id(), InitFeatures::known(), &open_channel);
+       let events = receiver.node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               Event::OpenChannelRequest { temporary_channel_id, .. } => {
+                       receiver.node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &initiator.node.get_our_node_id(), 0).unwrap();
+               },
+               _ => panic!("Unexpected event"),
+       };
+
+       let mut accept_channel = get_event_msg!(receiver, MessageSendEvent::SendAcceptChannel, initiator.node.get_our_node_id());
+       assert_eq!(accept_channel.minimum_depth, 0);
+       initiator.node.handle_accept_channel(&receiver.node.get_our_node_id(), InitFeatures::known(), &accept_channel);
+
+       let (temporary_channel_id, tx, _) = create_funding_transaction(&initiator, &receiver.node.get_our_node_id(), 100_000, 42);
+       initiator.node.funding_transaction_generated(&temporary_channel_id, &receiver.node.get_our_node_id(), tx.clone()).unwrap();
+       let funding_created = get_event_msg!(initiator, MessageSendEvent::SendFundingCreated, receiver.node.get_our_node_id());
+
+       receiver.node.handle_funding_created(&initiator.node.get_our_node_id(), &funding_created);
+       check_added_monitors!(receiver, 1);
+       let bs_signed_locked = receiver.node.get_and_clear_pending_msg_events();
+       assert_eq!(bs_signed_locked.len(), 2);
+       let as_funding_locked;
+       match &bs_signed_locked[0] {
+               MessageSendEvent::SendFundingSigned { node_id, msg } => {
+                       assert_eq!(*node_id, initiator.node.get_our_node_id());
+                       initiator.node.handle_funding_signed(&receiver.node.get_our_node_id(), &msg);
+                       check_added_monitors!(initiator, 1);
+
+                       assert_eq!(initiator.tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+                       assert_eq!(initiator.tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0)[0], tx);
+
+                       as_funding_locked = get_event_msg!(initiator, MessageSendEvent::SendFundingLocked, receiver.node.get_our_node_id());
+               }
+               _ => panic!("Unexpected event"),
+       }
+       match &bs_signed_locked[1] {
+               MessageSendEvent::SendFundingLocked { node_id, msg } => {
+                       assert_eq!(*node_id, initiator.node.get_our_node_id());
+                       initiator.node.handle_funding_locked(&receiver.node.get_our_node_id(), &msg);
+               }
+               _ => panic!("Unexpected event"),
+       }
+
+       receiver.node.handle_funding_locked(&initiator.node.get_our_node_id(), &as_funding_locked);
+
+       let as_channel_update = get_event_msg!(initiator, MessageSendEvent::SendChannelUpdate, receiver.node.get_our_node_id());
+       let bs_channel_update = get_event_msg!(receiver, MessageSendEvent::SendChannelUpdate, initiator.node.get_our_node_id());
+
+       initiator.node.handle_channel_update(&receiver.node.get_our_node_id(), &bs_channel_update);
+       receiver.node.handle_channel_update(&initiator.node.get_our_node_id(), &as_channel_update);
+
+       assert_eq!(initiator.node.list_usable_channels().len(), 1);
+       assert_eq!(receiver.node.list_usable_channels().len(), 1);
+
+       tx
+}
+
+#[test]
+fn test_simple_0conf_channel() {
+       // If our peer tells us they will accept our channel with 0 confs, and we funded the channel,
+       // we should trust the funding won't be double-spent (assuming `trust_own_funding_0conf` is
+       // set)!
+       // Further, if we `accept_inbound_channel_from_trusted_peer_0conf`, funding locked messages
+       // should fly immediately and the channel should be available for use as soon as they are
+       // received.
+
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let mut chan_config = test_default_channel_config();
+       chan_config.manually_accept_inbound_channels = true;
+
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config)]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       open_zero_conf_channel(&nodes[0], &nodes[1], None);
+
+       send_payment(&nodes[0], &[&nodes[1]], 100_000);
+}
+
+#[test]
+fn test_0conf_channel_with_async_monitor() {
+       // Test that we properly send out funding_locked in (both inbound- and outbound-) zero-conf
+       // channels if ChannelMonitor updates return a `TemporaryFailure` during the initial channel
+       // negotiation.
+
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let mut chan_config = test_default_channel_config();
+       chan_config.manually_accept_inbound_channels = true;
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(chan_config), None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0, InitFeatures::known(), InitFeatures::known());
+
+       chan_config.channel_options.announced_channel = false;
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(chan_config)).unwrap();
+       let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               Event::OpenChannelRequest { temporary_channel_id, .. } => {
+                       nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
+               },
+               _ => panic!("Unexpected event"),
+       };
+
+       let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+       assert_eq!(accept_channel.minimum_depth, 0);
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
+
+       let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
+       nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
+       let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+
+       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
+       check_added_monitors!(nodes[1], 1);
+       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+       let channel_id = funding_output.to_channel_id();
+       nodes[1].chain_monitor.complete_sole_pending_chan_update(&channel_id);
+
+       let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events();
+       assert_eq!(bs_signed_locked.len(), 2);
+       chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+
+       match &bs_signed_locked[0] {
+               MessageSendEvent::SendFundingSigned { node_id, msg } => {
+                       assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+                       nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &msg);
+                       check_added_monitors!(nodes[0], 1);
+               }
+               _ => panic!("Unexpected event"),
+       }
+       match &bs_signed_locked[1] {
+               MessageSendEvent::SendFundingLocked { node_id, msg } => {
+                       assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+                       nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &msg);
+               }
+               _ => panic!("Unexpected event"),
+       }
+
+       assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+
+       assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+       nodes[0].chain_monitor.complete_sole_pending_chan_update(&channel_id);
+       let as_locked_update = nodes[0].node.get_and_clear_pending_msg_events();
+
+       // Note that the funding transaction is actually released when
+       // get_and_clear_pending_msg_events, above, checks for monitor events.
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0)[0], tx);
+
+       match &as_locked_update[0] {
+               MessageSendEvent::SendFundingLocked { node_id, msg } => {
+                       assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+                       nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &msg);
+               }
+               _ => panic!("Unexpected event"),
+       }
+       let bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
+
+       let as_channel_update = match &as_locked_update[1] {
+               MessageSendEvent::SendChannelUpdate { node_id, msg } => {
+                       assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+                       msg.clone()
+               }
+               _ => panic!("Unexpected event"),
+       };
+
+       chanmon_cfgs[0].persister.set_update_ret(Ok(()));
+       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+
+       nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_channel_update);
+       nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_channel_update);
+
+       assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
+       assert_eq!(nodes[1].node.list_usable_channels().len(), 2);
+
+       send_payment(&nodes[0], &[&nodes[1]], 100_000);
+
+       // Now that we have useful channels, try sending a payment where the we hit a temporary monitor
+       // failure before we've ever confirmed the funding transaction. This previously caused a panic.
+       let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000);
+
+       nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+       check_added_monitors!(nodes[0], 1);
+
+       let as_send = SendEvent::from_node(&nodes[0]);
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_send.msgs[0]);
+       nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_send.commitment_msg);
+       check_added_monitors!(nodes[1], 1);
+
+       let (bs_raa, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
+       check_added_monitors!(nodes[0], 1);
+
+       nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
+       check_added_monitors!(nodes[0], 1);
+
+       chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
+       check_added_monitors!(nodes[1], 1);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+       chanmon_cfgs[1].persister.set_update_ret(Ok(()));
+       let (outpoint, _, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&bs_raa.channel_id).unwrap().clone();
+       nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, latest_update).unwrap();
+       check_added_monitors!(nodes[1], 0);
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       check_added_monitors!(nodes[1], 1);
+
+       let bs_send = SendEvent::from_node(&nodes[1]);
+       nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_send.msgs[0]);
+       commitment_signed_dance!(nodes[2], nodes[1], bs_send.commitment_msg, false);
+       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_payment_received!(nodes[2], payment_hash, payment_secret, 1_000_000);
+       claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
+
+       confirm_transaction(&nodes[0], &tx);
+       confirm_transaction(&nodes[1], &tx);
+
+       send_payment(&nodes[0], &[&nodes[1]], 100_000);
+}
+
+#[test]
+fn test_0conf_close_no_early_chan_update() {
+       // Tests that even with a public channel 0conf channel, we don't generate a channel_update on
+       // closing.
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let mut chan_config = test_default_channel_config();
+       chan_config.manually_accept_inbound_channels = true;
+
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config)]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // This is the default but we force it on anyway
+       chan_config.channel_options.announced_channel = true;
+       open_zero_conf_channel(&nodes[0], &nodes[1], Some(chan_config));
+
+       // We can use the channel immediately, but won't generate a channel_update until we get confs
+       send_payment(&nodes[0], &[&nodes[1]], 100_000);
+
+       nodes[0].node.force_close_all_channels();
+       check_added_monitors!(nodes[0], 1);
+       check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed);
+       let _ = get_err_msg!(nodes[0], nodes[1].node.get_our_node_id());
+}
+
+#[test]
+fn test_public_0conf_channel() {
+       // Tests that we will announce a public channel (after confirmation) even if its 0conf.
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let mut chan_config = test_default_channel_config();
+       chan_config.manually_accept_inbound_channels = true;
+
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config)]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // This is the default but we force it on anyway
+       chan_config.channel_options.announced_channel = true;
+       let tx = open_zero_conf_channel(&nodes[0], &nodes[1], Some(chan_config));
+
+       // We can use the channel immediately, but we can't announce it until we get 6+ confirmations
+       send_payment(&nodes[0], &[&nodes[1]], 100_000);
+
+       let scid = confirm_transaction(&nodes[0], &tx);
+       let as_announcement_sigs = get_event_msg!(nodes[0], MessageSendEvent::SendAnnouncementSignatures, nodes[1].node.get_our_node_id());
+       assert_eq!(confirm_transaction(&nodes[1], &tx), scid);
+       let bs_announcement_sigs = get_event_msg!(nodes[1], MessageSendEvent::SendAnnouncementSignatures, nodes[0].node.get_our_node_id());
+
+       nodes[1].node.handle_announcement_signatures(&nodes[0].node.get_our_node_id(), &as_announcement_sigs);
+       nodes[0].node.handle_announcement_signatures(&nodes[1].node.get_our_node_id(), &bs_announcement_sigs);
+
+       let bs_announcement = nodes[1].node.get_and_clear_pending_msg_events();
+       assert_eq!(bs_announcement.len(), 1);
+       let announcement;
+       let bs_update;
+       match bs_announcement[0] {
+               MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
+                       announcement = msg.clone();
+                       bs_update = update_msg.clone();
+               },
+               _ => panic!("Unexpected event"),
+       };
+
+       let as_announcement = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(as_announcement.len(), 1);
+       match as_announcement[0] {
+               MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
+                       assert!(announcement == *msg);
+                       assert_eq!(update_msg.contents.short_channel_id, scid);
+                       assert_eq!(update_msg.contents.short_channel_id, announcement.contents.short_channel_id);
+                       assert_eq!(update_msg.contents.short_channel_id, bs_update.contents.short_channel_id);
+               },
+               _ => panic!("Unexpected event"),
+       };
+}
+
+#[test]
+fn test_0conf_channel_reorg() {
+       // If we accept a 0conf channel, which is then confirmed, but then changes SCID in a reorg, we
+       // have to make sure we handle this correctly (or, currently, just force-close the channel).
+
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let mut chan_config = test_default_channel_config();
+       chan_config.manually_accept_inbound_channels = true;
+
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(chan_config)]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // This is the default but we force it on anyway
+       chan_config.channel_options.announced_channel = true;
+       let tx = open_zero_conf_channel(&nodes[0], &nodes[1], Some(chan_config));
+
+       // We can use the channel immediately, but we can't announce it until we get 6+ confirmations
+       send_payment(&nodes[0], &[&nodes[1]], 100_000);
+
+       mine_transaction(&nodes[0], &tx);
+       mine_transaction(&nodes[1], &tx);
+
+       // Send a payment using the channel's real SCID, which will be public in a few blocks once we
+       // can generate a channel_announcement.
+       let real_scid = nodes[0].node.list_usable_channels()[0].short_channel_id.unwrap();
+       assert_eq!(nodes[1].node.list_usable_channels()[0].short_channel_id.unwrap(), real_scid);
+
+       let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 10_000);
+       assert_eq!(route.paths[0][0].short_channel_id, real_scid);
+       send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1]]], 10_000, payment_hash, payment_secret);
+       claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
+
+       disconnect_blocks(&nodes[0], 1);
+       disconnect_blocks(&nodes[1], 1);
+
+       // At this point the channel no longer has an SCID again. In the future we should likely
+       // support simply un-setting the SCID and waiting until the channel gets re-confirmed, but for
+       // now we force-close the channel here.
+       check_closed_event!(&nodes[0], 1, ClosureReason::ProcessingError {
+               err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs.".to_owned()
+       });
+       check_closed_broadcast!(nodes[0], true);
+       check_closed_event!(&nodes[1], 1, ClosureReason::ProcessingError {
+               err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs.".to_owned()
+       });
+       check_closed_broadcast!(nodes[1], true);
+}
index d32dae22e8f2e076e7757185ddd39384efe1467c..0025598e4c2400702503fe072b1d8b7459f58b39 100644 (file)
@@ -315,6 +315,11 @@ fn test_unconf_chan() {
        do_test_unconf_chan(false, true, false, ConnectStyle::BestBlockFirstSkippingBlocks);
        do_test_unconf_chan(true, false, false, ConnectStyle::BestBlockFirstSkippingBlocks);
        do_test_unconf_chan(false, false, false, ConnectStyle::BestBlockFirstSkippingBlocks);
+
+       do_test_unconf_chan(true, true, false, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+       do_test_unconf_chan(false, true, false, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+       do_test_unconf_chan(true, false, false, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+       do_test_unconf_chan(false, false, false, ConnectStyle::BestBlockFirstReorgsOnlyTip);
 }
 
 #[test]
@@ -332,6 +337,11 @@ fn test_unconf_chan_via_funding_unconfirmed() {
        do_test_unconf_chan(true, false, true, ConnectStyle::BestBlockFirstSkippingBlocks);
        do_test_unconf_chan(false, false, true, ConnectStyle::BestBlockFirstSkippingBlocks);
 
+       do_test_unconf_chan(true, true, true, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+       do_test_unconf_chan(false, true, true, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+       do_test_unconf_chan(true, false, true, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+       do_test_unconf_chan(false, false, true, ConnectStyle::BestBlockFirstReorgsOnlyTip);
+
        do_test_unconf_chan(true, true, true, ConnectStyle::FullBlockViaListen);
        do_test_unconf_chan(false, true, true, ConnectStyle::FullBlockViaListen);
        do_test_unconf_chan(true, false, true, ConnectStyle::FullBlockViaListen);
@@ -541,7 +551,9 @@ fn do_test_to_remote_after_local_detection(style: ConnectStyle) {
 fn test_to_remote_after_local_detection() {
        do_test_to_remote_after_local_detection(ConnectStyle::BestBlockFirst);
        do_test_to_remote_after_local_detection(ConnectStyle::BestBlockFirstSkippingBlocks);
+       do_test_to_remote_after_local_detection(ConnectStyle::BestBlockFirstReorgsOnlyTip);
        do_test_to_remote_after_local_detection(ConnectStyle::TransactionsFirst);
        do_test_to_remote_after_local_detection(ConnectStyle::TransactionsFirstSkippingBlocks);
+       do_test_to_remote_after_local_detection(ConnectStyle::TransactionsFirstReorgsOnlyTip);
        do_test_to_remote_after_local_detection(ConnectStyle::FullBlockViaListen);
 }
index 7abe3060fa7b666f22fc83048ca330a194e3fdfb..0e25f46d472141fb2f1e7a2f58ada2bbabc25e15 100644 (file)
@@ -16,7 +16,7 @@ use io;
 
 /// A script pubkey for shutting down a channel as defined by [BOLT #2].
 ///
-/// [BOLT #2]: https://github.com/lightningnetwork/lightning-rfc/blob/master/02-peer-protocol.md
+/// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
 #[derive(Clone, PartialEq)]
 pub struct ShutdownScript(ShutdownScriptImpl);
 
@@ -25,7 +25,7 @@ pub struct ShutdownScript(ShutdownScriptImpl);
 pub struct InvalidShutdownScript {
        /// The script that did not meet the requirements from [BOLT #2].
        ///
-       /// [BOLT #2]: https://github.com/lightningnetwork/lightning-rfc/blob/master/02-peer-protocol.md
+       /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
        pub script: Script
 }
 
index 8fd5c16f36261ce3782d72bb81846b3e5b503376..a0b549452830ecd85f05ba5fa76ab543ae29aa81 100644 (file)
@@ -10,7 +10,7 @@
 //! Wire encoding/decoding for Lightning messages according to [BOLT #1], and for
 //! custom message through the [`CustomMessageReader`] trait.
 //! 
-//! [BOLT #1]: https://github.com/lightningnetwork/lightning-rfc/blob/master/01-messaging.md
+//! [BOLT #1]: https://github.com/lightning/bolts/blob/master/01-messaging.md
 
 use io;
 use ln::msgs;
index 2e0679eba79f6cbbb1473ebdf89a0e113cf8fd5e..26719cc279061f180f5617f204846922271f3f3b 100644 (file)
@@ -67,7 +67,7 @@ impl NodeId {
        pub fn from_pubkey(pubkey: &PublicKey) -> Self {
                NodeId(pubkey.serialize())
        }
-       
+
        /// Get the public key slice from this NodeId
        pub fn as_slice(&self) -> &[u8] {
                &self.0
@@ -150,7 +150,7 @@ pub struct ReadOnlyNetworkGraph<'a> {
 /// Update to the [`NetworkGraph`] based on payment failure information conveyed via the Onion
 /// return packet by a node along the route. See [BOLT #4] for details.
 ///
-/// [BOLT #4]: https://github.com/lightningnetwork/lightning-rfc/blob/master/04-onion-routing.md
+/// [BOLT #4]: https://github.com/lightning/bolts/blob/master/04-onion-routing.md
 #[derive(Clone, Debug, PartialEq)]
 pub enum NetworkUpdate {
        /// An error indicating a `channel_update` messages should be applied via
@@ -695,7 +695,7 @@ impl ChannelInfo {
                                return None;
                        }
                };
-               Some((DirectedChannelInfo { channel: self, direction }, source))
+               Some((DirectedChannelInfo::new(self, direction), source))
        }
 
        /// Returns a [`DirectedChannelInfo`] for the channel directed from the given `source` to a
@@ -710,7 +710,17 @@ impl ChannelInfo {
                                return None;
                        }
                };
-               Some((DirectedChannelInfo { channel: self, direction }, target))
+               Some((DirectedChannelInfo::new(self, direction), target))
+       }
+
+       /// Returns a [`ChannelUpdateInfo`] based on the direction implied by the channel_flag.
+       pub fn get_directional_info(&self, channel_flags: u8) -> Option<&ChannelUpdateInfo> {
+               let direction = channel_flags & 1u8;
+               if direction == 0 {
+                       self.one_to_two.as_ref()
+               } else {
+                       self.two_to_one.as_ref()
+               }
        }
 }
 
@@ -739,35 +749,53 @@ impl_writeable_tlv_based!(ChannelInfo, {
 pub struct DirectedChannelInfo<'a> {
        channel: &'a ChannelInfo,
        direction: Option<&'a ChannelUpdateInfo>,
+       htlc_maximum_msat: u64,
+       effective_capacity: EffectiveCapacity,
 }
 
 impl<'a> DirectedChannelInfo<'a> {
+       #[inline]
+       fn new(channel: &'a ChannelInfo, direction: Option<&'a ChannelUpdateInfo>) -> Self {
+               let htlc_maximum_msat = direction.and_then(|direction| direction.htlc_maximum_msat);
+               let capacity_msat = channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000);
+
+               let (htlc_maximum_msat, effective_capacity) = match (htlc_maximum_msat, capacity_msat) {
+                       (Some(amount_msat), Some(capacity_msat)) => {
+                               let htlc_maximum_msat = cmp::min(amount_msat, capacity_msat);
+                               (htlc_maximum_msat, EffectiveCapacity::Total { capacity_msat })
+                       },
+                       (Some(amount_msat), None) => {
+                               (amount_msat, EffectiveCapacity::MaximumHTLC { amount_msat })
+                       },
+                       (None, Some(capacity_msat)) => {
+                               (capacity_msat, EffectiveCapacity::Total { capacity_msat })
+                       },
+                       (None, None) => (EffectiveCapacity::Unknown.as_msat(), EffectiveCapacity::Unknown),
+               };
+
+               Self {
+                       channel, direction, htlc_maximum_msat, effective_capacity
+               }
+       }
+
        /// Returns information for the channel.
        pub fn channel(&self) -> &'a ChannelInfo { self.channel }
 
        /// Returns information for the direction.
        pub fn direction(&self) -> Option<&'a ChannelUpdateInfo> { self.direction }
 
+       /// Returns the maximum HTLC amount allowed over the channel in the direction.
+       pub fn htlc_maximum_msat(&self) -> u64 {
+               self.htlc_maximum_msat
+       }
+
        /// Returns the [`EffectiveCapacity`] of the channel in the direction.
        ///
        /// This is either the total capacity from the funding transaction, if known, or the
        /// `htlc_maximum_msat` for the direction as advertised by the gossip network, if known,
-       /// whichever is smaller.
+       /// otherwise.
        pub fn effective_capacity(&self) -> EffectiveCapacity {
-               let capacity_msat = self.channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000);
-               self.direction
-                       .and_then(|direction| direction.htlc_maximum_msat)
-                       .map(|max_htlc_msat| {
-                               let capacity_msat = capacity_msat.unwrap_or(u64::max_value());
-                               if max_htlc_msat < capacity_msat {
-                                       EffectiveCapacity::MaximumHTLC { amount_msat: max_htlc_msat }
-                               } else {
-                                       EffectiveCapacity::Total { capacity_msat }
-                               }
-                       })
-                       .or_else(|| capacity_msat.map(|capacity_msat|
-                                       EffectiveCapacity::Total { capacity_msat }))
-                       .unwrap_or(EffectiveCapacity::Unknown)
+               self.effective_capacity
        }
 
        /// Returns `Some` if [`ChannelUpdateInfo`] is available in the direction.
@@ -805,6 +833,10 @@ impl<'a> DirectedChannelInfoWithUpdate<'a> {
        /// Returns the [`EffectiveCapacity`] of the channel in the direction.
        #[inline]
        pub(super) fn effective_capacity(&self) -> EffectiveCapacity { self.inner.effective_capacity() }
+
+       /// Returns the maximum HTLC amount allowed over the channel in the direction.
+       #[inline]
+       pub(super) fn htlc_maximum_msat(&self) -> u64 { self.inner.htlc_maximum_msat() }
 }
 
 impl<'a> fmt::Debug for DirectedChannelInfoWithUpdate<'a> {
@@ -817,6 +849,7 @@ impl<'a> fmt::Debug for DirectedChannelInfoWithUpdate<'a> {
 ///
 /// While this may be smaller than the actual channel capacity, amounts greater than
 /// [`Self::as_msat`] should not be routed through the channel.
+#[derive(Clone, Copy)]
 pub enum EffectiveCapacity {
        /// The available liquidity in the channel known from being a channel counterparty, and thus a
        /// direct hop.
@@ -1132,6 +1165,83 @@ impl NetworkGraph {
                self.update_channel_from_unsigned_announcement_intern(msg, None, chain_access)
        }
 
+       /// Update channel from partial announcement data received via rapid gossip sync
+       ///
+       /// `timestamp: u64`: Timestamp emulating the backdated original announcement receipt (by the
+       /// rapid gossip sync server)
+       ///
+       /// All other parameters as used in [`msgs::UnsignedChannelAnnouncement`] fields.
+       pub fn add_channel_from_partial_announcement(&self, short_channel_id: u64, timestamp: u64, features: ChannelFeatures, node_id_1: PublicKey, node_id_2: PublicKey) -> Result<(), LightningError> {
+               if node_id_1 == node_id_2 {
+                       return Err(LightningError{err: "Channel announcement node had a channel with itself".to_owned(), action: ErrorAction::IgnoreError});
+               };
+
+               let node_1 = NodeId::from_pubkey(&node_id_1);
+               let node_2 = NodeId::from_pubkey(&node_id_2);
+               let channel_info = ChannelInfo {
+                       features,
+                       node_one: node_1.clone(),
+                       one_to_two: None,
+                       node_two: node_2.clone(),
+                       two_to_one: None,
+                       capacity_sats: None,
+                       announcement_message: None,
+                       announcement_received_time: timestamp,
+               };
+
+               self.add_channel_between_nodes(short_channel_id, channel_info, None)
+       }
+
+       fn add_channel_between_nodes(&self, short_channel_id: u64, channel_info: ChannelInfo, utxo_value: Option<u64>) -> Result<(), LightningError> {
+               let mut channels = self.channels.write().unwrap();
+               let mut nodes = self.nodes.write().unwrap();
+
+               let node_id_a = channel_info.node_one.clone();
+               let node_id_b = channel_info.node_two.clone();
+
+               match channels.entry(short_channel_id) {
+                       BtreeEntry::Occupied(mut entry) => {
+                               //TODO: because asking the blockchain if short_channel_id is valid is only optional
+                               //in the blockchain API, we need to handle it smartly here, though it's unclear
+                               //exactly how...
+                               if utxo_value.is_some() {
+                                       // Either our UTXO provider is busted, there was a reorg, or the UTXO provider
+                                       // only sometimes returns results. In any case remove the previous entry. Note
+                                       // that the spec expects us to "blacklist" the node_ids involved, but we can't
+                                       // do that because
+                                       // a) we don't *require* a UTXO provider that always returns results.
+                                       // b) we don't track UTXOs of channels we know about and remove them if they
+                                       //    get reorg'd out.
+                                       // c) it's unclear how to do so without exposing ourselves to massive DoS risk.
+                                       Self::remove_channel_in_nodes(&mut nodes, &entry.get(), short_channel_id);
+                                       *entry.get_mut() = channel_info;
+                               } else {
+                                       return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreDuplicateGossip});
+                               }
+                       },
+                       BtreeEntry::Vacant(entry) => {
+                               entry.insert(channel_info);
+                       }
+               };
+
+               for current_node_id in [node_id_a, node_id_b].iter() {
+                       match nodes.entry(current_node_id.clone()) {
+                               BtreeEntry::Occupied(node_entry) => {
+                                       node_entry.into_mut().channels.push(short_channel_id);
+                               },
+                               BtreeEntry::Vacant(node_entry) => {
+                                       node_entry.insert(NodeInfo {
+                                               channels: vec!(short_channel_id),
+                                               lowest_inbound_channel_fees: None,
+                                               announcement_info: None,
+                                       });
+                               }
+                       };
+               };
+
+               Ok(())
+       }
+
        fn update_channel_from_unsigned_announcement_intern<C: Deref>(
                &self, msg: &msgs::UnsignedChannelAnnouncement, full_msg: Option<&msgs::ChannelAnnouncement>, chain_access: &Option<C>
        ) -> Result<(), LightningError>
@@ -1180,65 +1290,18 @@ impl NetworkGraph {
                }
 
                let chan_info = ChannelInfo {
-                               features: msg.features.clone(),
-                               node_one: NodeId::from_pubkey(&msg.node_id_1),
-                               one_to_two: None,
-                               node_two: NodeId::from_pubkey(&msg.node_id_2),
-                               two_to_one: None,
-                               capacity_sats: utxo_value,
-                               announcement_message: if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY
-                                       { full_msg.cloned() } else { None },
-                               announcement_received_time,
-                       };
-
-               let mut channels = self.channels.write().unwrap();
-               let mut nodes = self.nodes.write().unwrap();
-               match channels.entry(msg.short_channel_id) {
-                       BtreeEntry::Occupied(mut entry) => {
-                               //TODO: because asking the blockchain if short_channel_id is valid is only optional
-                               //in the blockchain API, we need to handle it smartly here, though it's unclear
-                               //exactly how...
-                               if utxo_value.is_some() {
-                                       // Either our UTXO provider is busted, there was a reorg, or the UTXO provider
-                                       // only sometimes returns results. In any case remove the previous entry. Note
-                                       // that the spec expects us to "blacklist" the node_ids involved, but we can't
-                                       // do that because
-                                       // a) we don't *require* a UTXO provider that always returns results.
-                                       // b) we don't track UTXOs of channels we know about and remove them if they
-                                       //    get reorg'd out.
-                                       // c) it's unclear how to do so without exposing ourselves to massive DoS risk.
-                                       Self::remove_channel_in_nodes(&mut nodes, &entry.get(), msg.short_channel_id);
-                                       *entry.get_mut() = chan_info;
-                               } else {
-                                       return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreDuplicateGossip});
-                               }
-                       },
-                       BtreeEntry::Vacant(entry) => {
-                               entry.insert(chan_info);
-                       }
+                       features: msg.features.clone(),
+                       node_one: NodeId::from_pubkey(&msg.node_id_1),
+                       one_to_two: None,
+                       node_two: NodeId::from_pubkey(&msg.node_id_2),
+                       two_to_one: None,
+                       capacity_sats: utxo_value,
+                       announcement_message: if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY
+                               { full_msg.cloned() } else { None },
+                       announcement_received_time,
                };
 
-               macro_rules! add_channel_to_node {
-                       ( $node_id: expr ) => {
-                               match nodes.entry($node_id) {
-                                       BtreeEntry::Occupied(node_entry) => {
-                                               node_entry.into_mut().channels.push(msg.short_channel_id);
-                                       },
-                                       BtreeEntry::Vacant(node_entry) => {
-                                               node_entry.insert(NodeInfo {
-                                                       channels: vec!(msg.short_channel_id),
-                                                       lowest_inbound_channel_fees: None,
-                                                       announcement_info: None,
-                                               });
-                                       }
-                               }
-                       };
-               }
-
-               add_channel_to_node!(NodeId::from_pubkey(&msg.node_id_1));
-               add_channel_to_node!(NodeId::from_pubkey(&msg.node_id_2));
-
-               Ok(())
+               self.add_channel_between_nodes(msg.short_channel_id, chan_info, utxo_value)
        }
 
        /// Close a channel if a corresponding HTLC fail was sent.
@@ -1558,7 +1621,7 @@ mod tests {
        use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
        use routing::network_graph::{NetGraphMsgHandler, NetworkGraph, NetworkUpdate, MAX_EXCESS_BYTES_FOR_RELAY};
        use ln::msgs::{Init, OptionalField, RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement,
-               UnsignedChannelAnnouncement, ChannelAnnouncement, UnsignedChannelUpdate, ChannelUpdate, 
+               UnsignedChannelAnnouncement, ChannelAnnouncement, UnsignedChannelUpdate, ChannelUpdate,
                ReplyChannelRange, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT};
        use util::test_utils;
        use util::logger::Logger;
index 8d326b7d7f4f24cfb373f7caa9c146174760f6fd..6c8f40017811363be882133ecb09e3aaa71338c2 100644 (file)
@@ -17,9 +17,9 @@ use bitcoin::secp256k1::PublicKey;
 use ln::channelmanager::ChannelDetails;
 use ln::features::{ChannelFeatures, InvoiceFeatures, NodeFeatures};
 use ln::msgs::{DecodeError, ErrorAction, LightningError, MAX_VALUE_MSAT};
-use routing::scoring::Score;
+use routing::scoring::{ChannelUsage, Score};
 use routing::network_graph::{DirectedChannelInfoWithUpdate, EffectiveCapacity, NetworkGraph, ReadOnlyNetworkGraph, NodeId, RoutingFees};
-use util::ser::{Writeable, Readable};
+use util::ser::{Writeable, Readable, Writer};
 use util::logger::{Level, Logger};
 use util::chacha20::ChaCha20;
 
@@ -151,8 +151,8 @@ impl Readable for Route {
 
 /// Parameters needed to find a [`Route`].
 ///
-/// Passed to [`find_route`] and also provided in [`Event::PaymentPathFailed`] for retrying a failed
-/// payment path.
+/// Passed to [`find_route`] and [`build_route_from_hops`], but also provided in
+/// [`Event::PaymentPathFailed`] for retrying a failed payment path.
 ///
 /// [`Event::PaymentPathFailed`]: crate::util::events::Event::PaymentPathFailed
 #[derive(Clone, Debug)]
@@ -383,7 +383,7 @@ enum CandidateRouteHop<'a> {
 impl<'a> CandidateRouteHop<'a> {
        fn short_channel_id(&self) -> u64 {
                match self {
-                       CandidateRouteHop::FirstHop { details } => details.short_channel_id.unwrap(),
+                       CandidateRouteHop::FirstHop { details } => details.get_outbound_payment_scid().unwrap(),
                        CandidateRouteHop::PublicHop { short_channel_id, .. } => *short_channel_id,
                        CandidateRouteHop::PrivateHop { hint } => hint.short_channel_id,
                }
@@ -414,6 +414,16 @@ impl<'a> CandidateRouteHop<'a> {
                }
        }
 
+       fn htlc_maximum_msat(&self) -> u64 {
+               match self {
+                       CandidateRouteHop::FirstHop { details } => details.next_outbound_htlc_limit_msat,
+                       CandidateRouteHop::PublicHop { info, .. } => info.htlc_maximum_msat(),
+                       CandidateRouteHop::PrivateHop { hint } => {
+                               hint.htlc_maximum_msat.unwrap_or(u64::max_value())
+                       },
+               }
+       }
+
        fn fees(&self) -> RoutingFees {
                match self {
                        CandidateRouteHop::FirstHop { .. } => RoutingFees {
@@ -481,7 +491,8 @@ struct PathBuildingHop<'a> {
 
 impl<'a> core::fmt::Debug for PathBuildingHop<'a> {
        fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
-               f.debug_struct("PathBuildingHop")
+               let mut debug_struct = f.debug_struct("PathBuildingHop");
+               debug_struct
                        .field("node_id", &self.node_id)
                        .field("short_channel_id", &self.candidate.short_channel_id())
                        .field("total_fee_msat", &self.total_fee_msat)
@@ -490,8 +501,11 @@ impl<'a> core::fmt::Debug for PathBuildingHop<'a> {
                        .field("total_fee_msat - (next_hops_fee_msat + hop_use_fee_msat)", &(&self.total_fee_msat - (&self.next_hops_fee_msat + &self.hop_use_fee_msat)))
                        .field("path_penalty_msat", &self.path_penalty_msat)
                        .field("path_htlc_minimum_msat", &self.path_htlc_minimum_msat)
-                       .field("cltv_expiry_delta", &self.candidate.cltv_expiry_delta())
-                       .finish()
+                       .field("cltv_expiry_delta", &self.candidate.cltv_expiry_delta());
+               #[cfg(all(not(feature = "_bench_unstable"), any(test, fuzzing)))]
+               let debug_struct = debug_struct
+                       .field("value_contribution_msat", &self.value_contribution_msat);
+               debug_struct.finish()
        }
 }
 
@@ -662,16 +676,11 @@ pub fn find_route<L: Deref, S: Score>(
 ) -> Result<Route, LightningError>
 where L::Target: Logger {
        let network_graph = network.read_only();
-       match get_route(
-               our_node_pubkey, &route_params.payment_params, &network_graph, first_hops, route_params.final_value_msat,
-               route_params.final_cltv_expiry_delta, logger, scorer, random_seed_bytes
-       ) {
-               Ok(mut route) => {
-                       add_random_cltv_offset(&mut route, &route_params.payment_params, &network_graph, random_seed_bytes);
-                       Ok(route)
-               },
-               Err(err) => Err(err),
-       }
+       let mut route = get_route(our_node_pubkey, &route_params.payment_params, &network_graph, first_hops,
+               route_params.final_value_msat, route_params.final_cltv_expiry_delta, logger, scorer,
+               random_seed_bytes)?;
+       add_random_cltv_offset(&mut route, &route_params.payment_params, &network_graph, random_seed_bytes);
+       Ok(route)
 }
 
 pub(crate) fn get_route<L: Deref, S: Score>(
@@ -792,7 +801,7 @@ where L::Target: Logger {
                HashMap::with_capacity(if first_hops.is_some() { first_hops.as_ref().unwrap().len() } else { 0 });
        if let Some(hops) = first_hops {
                for chan in hops {
-                       if chan.short_channel_id.is_none() {
+                       if chan.get_outbound_payment_scid().is_none() {
                                panic!("first_hops should be filled in with usable channels, not pending ones");
                        }
                        if chan.counterparty.node_id == *our_node_pubkey {
@@ -830,12 +839,12 @@ where L::Target: Logger {
        let recommended_value_msat = final_value_msat * ROUTE_CAPACITY_PROVISION_FACTOR as u64;
        let mut path_value_msat = final_value_msat;
 
-       // We don't want multiple paths (as per MPP) share liquidity of the same channels.
-       // This map allows paths to be aware of the channel use by other paths in the same call.
-       // This would help to make a better path finding decisions and not "overbook" channels.
-       // It is unaware of the directions (except for `next_outbound_htlc_limit_msat` in
-       // `first_hops`).
-       let mut bookkept_channels_liquidity_available_msat = HashMap::with_capacity(network_nodes.len());
+       // Keep track of how much liquidity has been used in selected channels. Used to determine
+       // if the channel can be used by additional MPP paths or to inform path finding decisions. It is
+       // aware of direction *only* to ensure that the correct htlc_maximum_msat value is used. Hence,
+       // liquidity used in one direction will not offset any used in the opposite direction.
+       let mut used_channel_liquidities: HashMap<(u64, bool), u64> =
+               HashMap::with_capacity(network_nodes.len());
 
        // Keeping track of how much value we already collected across other paths. Helps to decide:
        // - how much a new path should be transferring (upper bound);
@@ -885,9 +894,7 @@ where L::Target: Logger {
                        // - for first and last hops early in get_route
                        if $src_node_id != $dest_node_id {
                                let short_channel_id = $candidate.short_channel_id();
-                               let available_liquidity_msat = bookkept_channels_liquidity_available_msat
-                                       .entry(short_channel_id)
-                                       .or_insert_with(|| $candidate.effective_capacity().as_msat());
+                               let htlc_maximum_msat = $candidate.htlc_maximum_msat();
 
                                // It is tricky to subtract $next_hops_fee_msat from available liquidity here.
                                // It may be misleading because we might later choose to reduce the value transferred
@@ -896,7 +903,14 @@ where L::Target: Logger {
                                // fees caused by one expensive channel, but then this channel could have been used
                                // if the amount being transferred over this path is lower.
                                // We do this for now, but this is a subject for removal.
-                               if let Some(available_value_contribution_msat) = available_liquidity_msat.checked_sub($next_hops_fee_msat) {
+                               if let Some(mut available_value_contribution_msat) = htlc_maximum_msat.checked_sub($next_hops_fee_msat) {
+                                       let used_liquidity_msat = used_channel_liquidities
+                                               .get(&(short_channel_id, $src_node_id < $dest_node_id))
+                                               .map_or(0, |used_liquidity_msat| {
+                                                       available_value_contribution_msat = available_value_contribution_msat
+                                                               .saturating_sub(*used_liquidity_msat);
+                                                       *used_liquidity_msat
+                                               });
 
                                        // Routing Fragmentation Mitigation heuristic:
                                        //
@@ -1047,9 +1061,16 @@ where L::Target: Logger {
                                                                }
                                                        }
 
-                                                       let path_penalty_msat = $next_hops_path_penalty_msat.saturating_add(
-                                                               scorer.channel_penalty_msat(short_channel_id, amount_to_transfer_over_msat,
-                                                                       *available_liquidity_msat, &$src_node_id, &$dest_node_id));
+                                                       let channel_usage = ChannelUsage {
+                                                               amount_msat: amount_to_transfer_over_msat,
+                                                               inflight_htlc_msat: used_liquidity_msat,
+                                                               effective_capacity: $candidate.effective_capacity(),
+                                                       };
+                                                       let channel_penalty_msat = scorer.channel_penalty_msat(
+                                                               short_channel_id, &$src_node_id, &$dest_node_id, channel_usage
+                                                       );
+                                                       let path_penalty_msat = $next_hops_path_penalty_msat
+                                                               .saturating_add(channel_penalty_msat);
                                                        let new_graph_node = RouteGraphNode {
                                                                node_id: $src_node_id,
                                                                lowest_fee_to_peer_through_node: total_fee_msat,
@@ -1207,9 +1228,8 @@ where L::Target: Logger {
 
        // TODO: diversify by nodes (so that all paths aren't doomed if one node is offline).
        'paths_collection: loop {
-               // For every new path, start from scratch, except
-               // bookkept_channels_liquidity_available_msat, which will improve
-               // the further iterations of path finding. Also don't erase first_hop_targets.
+               // For every new path, start from scratch, except for used_channel_liquidities, which
+               // helps to avoid reusing previously selected paths in future iterations.
                targets.clear();
                dist.clear();
                hit_minimum_limit = false;
@@ -1276,16 +1296,6 @@ where L::Target: Logger {
                                                        short_channel_id: hop.short_channel_id,
                                                })
                                                .unwrap_or_else(|| CandidateRouteHop::PrivateHop { hint: hop });
-                                       let capacity_msat = candidate.effective_capacity().as_msat();
-                                       aggregate_next_hops_path_penalty_msat = aggregate_next_hops_path_penalty_msat
-                                               .saturating_add(scorer.channel_penalty_msat(hop.short_channel_id,
-                                                       final_value_msat, capacity_msat, &source, &target));
-
-                                       aggregate_next_hops_cltv_delta = aggregate_next_hops_cltv_delta
-                                               .saturating_add(hop.cltv_expiry_delta as u32);
-
-                                       aggregate_next_hops_path_length = aggregate_next_hops_path_length
-                                               .saturating_add(1);
 
                                        if !add_entry!(candidate, source, target, aggregate_next_hops_fee_msat,
                                                                path_value_msat, aggregate_next_hops_path_htlc_minimum_msat,
@@ -1297,6 +1307,25 @@ where L::Target: Logger {
                                                hop_used = false;
                                        }
 
+                                       let used_liquidity_msat = used_channel_liquidities
+                                               .get(&(hop.short_channel_id, source < target)).copied().unwrap_or(0);
+                                       let channel_usage = ChannelUsage {
+                                               amount_msat: final_value_msat + aggregate_next_hops_fee_msat,
+                                               inflight_htlc_msat: used_liquidity_msat,
+                                               effective_capacity: candidate.effective_capacity(),
+                                       };
+                                       let channel_penalty_msat = scorer.channel_penalty_msat(
+                                               hop.short_channel_id, &source, &target, channel_usage
+                                       );
+                                       aggregate_next_hops_path_penalty_msat = aggregate_next_hops_path_penalty_msat
+                                               .saturating_add(channel_penalty_msat);
+
+                                       aggregate_next_hops_cltv_delta = aggregate_next_hops_cltv_delta
+                                               .saturating_add(hop.cltv_expiry_delta as u32);
+
+                                       aggregate_next_hops_path_length = aggregate_next_hops_path_length
+                                               .saturating_add(1);
+
                                        // Searching for a direct channel between last checked hop and first_hop_targets
                                        if let Some(first_channels) = first_hop_targets.get(&NodeId::from_pubkey(&prev_hop_id)) {
                                                for details in first_channels {
@@ -1382,7 +1411,7 @@ where L::Target: Logger {
                                        let mut features_set = false;
                                        if let Some(first_channels) = first_hop_targets.get(&ordered_hops.last().unwrap().0.node_id) {
                                                for details in first_channels {
-                                                       if details.short_channel_id.unwrap() == ordered_hops.last().unwrap().0.candidate.short_channel_id() {
+                                                       if details.get_outbound_payment_scid().unwrap() == ordered_hops.last().unwrap().0.candidate.short_channel_id() {
                                                                ordered_hops.last_mut().unwrap().1 = details.counterparty.features.to_context();
                                                                features_set = true;
                                                                break;
@@ -1448,26 +1477,30 @@ where L::Target: Logger {
                                // Remember that we used these channels so that we don't rely
                                // on the same liquidity in future paths.
                                let mut prevented_redundant_path_selection = false;
-                               for (payment_hop, _) in payment_path.hops.iter() {
-                                       let channel_liquidity_available_msat = bookkept_channels_liquidity_available_msat.get_mut(&payment_hop.candidate.short_channel_id()).unwrap();
-                                       let mut spent_on_hop_msat = value_contribution_msat;
-                                       let next_hops_fee_msat = payment_hop.next_hops_fee_msat;
-                                       spent_on_hop_msat += next_hops_fee_msat;
-                                       if spent_on_hop_msat == *channel_liquidity_available_msat {
+                               let prev_hop_iter = core::iter::once(&our_node_id)
+                                       .chain(payment_path.hops.iter().map(|(hop, _)| &hop.node_id));
+                               for (prev_hop, (hop, _)) in prev_hop_iter.zip(payment_path.hops.iter()) {
+                                       let spent_on_hop_msat = value_contribution_msat + hop.next_hops_fee_msat;
+                                       let used_liquidity_msat = used_channel_liquidities
+                                               .entry((hop.candidate.short_channel_id(), *prev_hop < hop.node_id))
+                                               .and_modify(|used_liquidity_msat| *used_liquidity_msat += spent_on_hop_msat)
+                                               .or_insert(spent_on_hop_msat);
+                                       if *used_liquidity_msat == hop.candidate.htlc_maximum_msat() {
                                                // If this path used all of this channel's available liquidity, we know
                                                // this path will not be selected again in the next loop iteration.
                                                prevented_redundant_path_selection = true;
                                        }
-                                       *channel_liquidity_available_msat -= spent_on_hop_msat;
+                                       debug_assert!(*used_liquidity_msat <= hop.candidate.htlc_maximum_msat());
                                }
                                if !prevented_redundant_path_selection {
                                        // If we weren't capped by hitting a liquidity limit on a channel in the path,
                                        // we'll probably end up picking the same path again on the next iteration.
                                        // Decrease the available liquidity of a hop in the middle of the path.
                                        let victim_scid = payment_path.hops[(payment_path.hops.len()) / 2].0.candidate.short_channel_id();
+                                       let exhausted = u64::max_value();
                                        log_trace!(logger, "Disabling channel {} for future path building iterations to avoid duplicates.", victim_scid);
-                                       let victim_liquidity = bookkept_channels_liquidity_available_msat.get_mut(&victim_scid).unwrap();
-                                       *victim_liquidity = 0;
+                                       *used_channel_liquidities.entry((victim_scid, false)).or_default() = exhausted;
+                                       *used_channel_liquidities.entry((victim_scid, true)).or_default() = exhausted;
                                }
 
                                // Track the total amount all our collected paths allow to send so that we:
@@ -1665,7 +1698,9 @@ where L::Target: Logger {
 // destination, if the remaining CLTV expiry delta exactly matches a feasible path in the network
 // graph. In order to improve privacy, this method obfuscates the CLTV expiry deltas along the
 // payment path by adding a randomized 'shadow route' offset to the final hop.
-fn add_random_cltv_offset(route: &mut Route, payment_params: &PaymentParameters, network_graph: &ReadOnlyNetworkGraph, random_seed_bytes: &[u8; 32]) {
+fn add_random_cltv_offset(route: &mut Route, payment_params: &PaymentParameters,
+       network_graph: &ReadOnlyNetworkGraph, random_seed_bytes: &[u8; 32]
+) {
        let network_channels = network_graph.channels();
        let network_nodes = network_graph.nodes();
 
@@ -1747,13 +1782,87 @@ fn add_random_cltv_offset(route: &mut Route, payment_params: &PaymentParameters,
        }
 }
 
+/// Construct a route from us (payer) to the target node (payee) via the given hops (which should
+/// exclude the payer, but include the payee). This may be useful, e.g., for probing the chosen path.
+///
+/// Re-uses logic from `find_route`, so the restrictions described there also apply here.
+pub fn build_route_from_hops<L: Deref>(
+       our_node_pubkey: &PublicKey, hops: &[PublicKey], route_params: &RouteParameters, network: &NetworkGraph,
+       logger: L, random_seed_bytes: &[u8; 32]
+) -> Result<Route, LightningError>
+where L::Target: Logger {
+       let network_graph = network.read_only();
+       let mut route = build_route_from_hops_internal(
+               our_node_pubkey, hops, &route_params.payment_params, &network_graph,
+               route_params.final_value_msat, route_params.final_cltv_expiry_delta, logger, random_seed_bytes)?;
+       add_random_cltv_offset(&mut route, &route_params.payment_params, &network_graph, random_seed_bytes);
+       Ok(route)
+}
+
+fn build_route_from_hops_internal<L: Deref>(
+       our_node_pubkey: &PublicKey, hops: &[PublicKey], payment_params: &PaymentParameters,
+       network_graph: &ReadOnlyNetworkGraph, final_value_msat: u64, final_cltv_expiry_delta: u32,
+       logger: L, random_seed_bytes: &[u8; 32]
+) -> Result<Route, LightningError> where L::Target: Logger {
+
+       struct HopScorer {
+               our_node_id: NodeId,
+               hop_ids: [Option<NodeId>; MAX_PATH_LENGTH_ESTIMATE as usize],
+       }
+
+       impl Score for HopScorer {
+               fn channel_penalty_msat(&self, _short_channel_id: u64, source: &NodeId, target: &NodeId,
+                       _usage: ChannelUsage) -> u64
+               {
+                       let mut cur_id = self.our_node_id;
+                       for i in 0..self.hop_ids.len() {
+                               if let Some(next_id) = self.hop_ids[i] {
+                                       if cur_id == *source && next_id == *target {
+                                               return 0;
+                                       }
+                                       cur_id = next_id;
+                               } else {
+                                       break;
+                               }
+                       }
+                       u64::max_value()
+               }
+
+               fn payment_path_failed(&mut self, _path: &[&RouteHop], _short_channel_id: u64) {}
+
+               fn payment_path_successful(&mut self, _path: &[&RouteHop]) {}
+       }
+
+       impl<'a> Writeable for HopScorer {
+               #[inline]
+               fn write<W: Writer>(&self, _w: &mut W) -> Result<(), io::Error> {
+                       unreachable!();
+               }
+       }
+
+       if hops.len() > MAX_PATH_LENGTH_ESTIMATE.into() {
+               return Err(LightningError{err: "Cannot build a route exceeding the maximum path length.".to_owned(), action: ErrorAction::IgnoreError});
+       }
+
+       let our_node_id = NodeId::from_pubkey(our_node_pubkey);
+       let mut hop_ids = [None; MAX_PATH_LENGTH_ESTIMATE as usize];
+       for i in 0..hops.len() {
+               hop_ids[i] = Some(NodeId::from_pubkey(&hops[i]));
+       }
+
+       let scorer = HopScorer { our_node_id, hop_ids };
+
+       get_route(our_node_pubkey, payment_params, network_graph, None, final_value_msat,
+               final_cltv_expiry_delta, logger, &scorer, random_seed_bytes)
+}
+
 #[cfg(test)]
 mod tests {
        use routing::network_graph::{NetworkGraph, NetGraphMsgHandler, NodeId};
-       use routing::router::{get_route, add_random_cltv_offset, default_node_features,
+       use routing::router::{get_route, build_route_from_hops_internal, add_random_cltv_offset, default_node_features,
                PaymentParameters, Route, RouteHint, RouteHintHop, RouteHop, RoutingFees,
                DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, MAX_PATH_LENGTH_ESTIMATE};
-       use routing::scoring::Score;
+       use routing::scoring::{ChannelUsage, Score};
        use chain::transaction::OutPoint;
        use chain::keysinterface::KeysInterface;
        use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures};
@@ -1797,6 +1906,7 @@ mod tests {
                        funding_txo: Some(OutPoint { txid: bitcoin::Txid::from_slice(&[0; 32]).unwrap(), index: 0 }),
                        channel_type: None,
                        short_channel_id,
+                       outbound_scid_alias: None,
                        inbound_scid_alias: None,
                        channel_value_satoshis: 0,
                        user_channel_id: 0,
@@ -5145,7 +5255,7 @@ mod tests {
                fn write<W: Writer>(&self, _w: &mut W) -> Result<(), ::io::Error> { unimplemented!() }
        }
        impl Score for BadChannelScorer {
-               fn channel_penalty_msat(&self, short_channel_id: u64, _send_amt: u64, _capacity_msat: u64, _source: &NodeId, _target: &NodeId) -> u64 {
+               fn channel_penalty_msat(&self, short_channel_id: u64, _: &NodeId, _: &NodeId, _: ChannelUsage) -> u64 {
                        if short_channel_id == self.short_channel_id { u64::max_value() } else { 0 }
                }
 
@@ -5163,7 +5273,7 @@ mod tests {
        }
 
        impl Score for BadNodeScorer {
-               fn channel_penalty_msat(&self, _short_channel_id: u64, _send_amt: u64, _capacity_msat: u64, _source: &NodeId, target: &NodeId) -> u64 {
+               fn channel_penalty_msat(&self, _: u64, _: &NodeId, target: &NodeId, _: ChannelUsage) -> u64 {
                        if *target == self.node_id { u64::max_value() } else { 0 }
                }
 
@@ -5448,6 +5558,26 @@ mod tests {
                assert!(path_plausibility.iter().all(|x| *x));
        }
 
+       #[test]
+       fn builds_correct_path_from_hops() {
+               let (secp_ctx, network, _, _, logger) = build_graph();
+               let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
+               let network_graph = network.read_only();
+
+               let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
+               let random_seed_bytes = keys_manager.get_secure_random_bytes();
+
+               let payment_params = PaymentParameters::from_node_id(nodes[3]);
+               let hops = [nodes[1], nodes[2], nodes[4], nodes[3]];
+               let route = build_route_from_hops_internal(&our_id, &hops, &payment_params,
+                        &network_graph, 100, 0, Arc::clone(&logger), &random_seed_bytes).unwrap();
+               let route_hop_pubkeys = route.paths[0].iter().map(|hop| hop.pubkey).collect::<Vec<_>>();
+               assert_eq!(hops.len(), route.paths[0].len());
+               for (idx, hop_pubkey) in hops.iter().enumerate() {
+                       assert!(*hop_pubkey == route_hop_pubkeys[idx]);
+               }
+       }
+
        #[cfg(not(feature = "no-std"))]
        pub(super) fn random_init_seed() -> u64 {
                // Because the default HashMap in std pulls OS randomness, we can use it as a (bad) RNG.
@@ -5609,6 +5739,7 @@ mod benches {
                        channel_type: None,
                        short_channel_id: Some(1),
                        inbound_scid_alias: None,
+                       outbound_scid_alias: None,
                        channel_value_satoshis: 10_000_000,
                        user_channel_id: 0,
                        balance_msat: 10_000_000,
index cffd2d90533b7290250b5a4886ad8c55fa5462fd..38e3c838425f5957fddb0a525dc15ca8cbc24eca 100644 (file)
@@ -55,7 +55,7 @@
 //! [`find_route`]: crate::routing::router::find_route
 
 use ln::msgs::DecodeError;
-use routing::network_graph::{NetworkGraph, NodeId};
+use routing::network_graph::{EffectiveCapacity, NetworkGraph, NodeId};
 use routing::router::RouteHop;
 use util::ser::{Readable, ReadableArgs, Writeable, Writer};
 use util::logger::Logger;
@@ -93,7 +93,9 @@ pub trait Score $(: $supertrait)* {
        /// such as a chain data, network gossip, or invoice hints. For invoice hints, a capacity near
        /// [`u64::max_value`] is given to indicate sufficient capacity for the invoice's full amount.
        /// Thus, implementations should be overflow-safe.
-       fn channel_penalty_msat(&self, short_channel_id: u64, send_amt_msat: u64, capacity_msat: u64, source: &NodeId, target: &NodeId) -> u64;
+       fn channel_penalty_msat(
+               &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage
+       ) -> u64;
 
        /// Handles updating channel penalties after failing to route through a channel.
        fn payment_path_failed(&mut self, path: &[&RouteHop], short_channel_id: u64);
@@ -103,8 +105,10 @@ pub trait Score $(: $supertrait)* {
 }
 
 impl<S: Score, T: DerefMut<Target=S> $(+ $supertrait)*> Score for T {
-       fn channel_penalty_msat(&self, short_channel_id: u64, send_amt_msat: u64, capacity_msat: u64, source: &NodeId, target: &NodeId) -> u64 {
-               self.deref().channel_penalty_msat(short_channel_id, send_amt_msat, capacity_msat, source, target)
+       fn channel_penalty_msat(
+               &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage
+       ) -> u64 {
+               self.deref().channel_penalty_msat(short_channel_id, source, target, usage)
        }
 
        fn payment_path_failed(&mut self, path: &[&RouteHop], short_channel_id: u64) {
@@ -202,6 +206,20 @@ impl<'a, S: Writeable> Writeable for MutexGuard<'a, S> {
        }
 }
 
+/// Proposed use of a channel passed as a parameter to [`Score::channel_penalty_msat`].
+#[derive(Clone, Copy)]
+pub struct ChannelUsage {
+       /// The amount to send through the channel, denominated in millisatoshis.
+       pub amount_msat: u64,
+
+       /// Total amount, denominated in millisatoshis, already allocated to send through the channel
+       /// as part of a multi-path payment.
+       pub inflight_htlc_msat: u64,
+
+       /// The effective capacity of the channel.
+       pub effective_capacity: EffectiveCapacity,
+}
+
 #[derive(Clone)]
 /// [`Score`] implementation that uses a fixed penalty.
 pub struct FixedPenaltyScorer {
@@ -216,7 +234,7 @@ impl FixedPenaltyScorer {
 }
 
 impl Score for FixedPenaltyScorer {
-       fn channel_penalty_msat(&self, _: u64, _: u64, _: u64, _: &NodeId, _: &NodeId) -> u64 {
+       fn channel_penalty_msat(&self, _: u64, _: &NodeId, _: &NodeId, _: ChannelUsage) -> u64 {
                self.penalty_msat
        }
 
@@ -407,13 +425,16 @@ impl Default for ScoringParameters {
 
 impl<T: Time> Score for ScorerUsingTime<T> {
        fn channel_penalty_msat(
-               &self, short_channel_id: u64, send_amt_msat: u64, capacity_msat: u64, _source: &NodeId, _target: &NodeId
+               &self, short_channel_id: u64, _source: &NodeId, _target: &NodeId, usage: ChannelUsage
        ) -> u64 {
                let failure_penalty_msat = self.channel_failures
                        .get(&short_channel_id)
                        .map_or(0, |value| value.decayed_penalty_msat(self.params.failure_penalty_half_life));
 
                let mut penalty_msat = self.params.base_penalty_msat + failure_penalty_msat;
+               let send_amt_msat = usage.amount_msat;
+               let capacity_msat = usage.effective_capacity.as_msat()
+                       .saturating_sub(usage.inflight_htlc_msat);
                let send_1024ths = send_amt_msat.checked_mul(1024).unwrap_or(u64::max_value()) / capacity_msat;
                if send_1024ths > self.params.overuse_penalty_start_1024th as u64 {
                        penalty_msat = penalty_msat.checked_add(
@@ -880,10 +901,20 @@ impl<L: DerefMut<Target = u64>, T: Time, U: DerefMut<Target = T>> DirectedChanne
 
 impl<G: Deref<Target = NetworkGraph>, L: Deref, T: Time> Score for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
        fn channel_penalty_msat(
-               &self, short_channel_id: u64, amount_msat: u64, capacity_msat: u64, source: &NodeId,
-               target: &NodeId
+               &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage
        ) -> u64 {
+               if let EffectiveCapacity::ExactLiquidity { liquidity_msat } = usage.effective_capacity {
+                       if usage.amount_msat > liquidity_msat {
+                               return u64::max_value();
+                       } else {
+                               return self.params.base_penalty_msat;
+                       };
+               }
+
                let liquidity_offset_half_life = self.params.liquidity_offset_half_life;
+               let amount_msat = usage.amount_msat;
+               let capacity_msat = usage.effective_capacity.as_msat()
+                       .saturating_sub(usage.inflight_htlc_msat);
                self.channel_liquidities
                        .get(&short_channel_id)
                        .unwrap_or(&ChannelLiquidity::new())
@@ -1338,8 +1369,8 @@ mod tests {
 
        use ln::features::{ChannelFeatures, NodeFeatures};
        use ln::msgs::{ChannelAnnouncement, ChannelUpdate, OptionalField, UnsignedChannelAnnouncement, UnsignedChannelUpdate};
-       use routing::scoring::Score;
-       use routing::network_graph::{NetworkGraph, NodeId};
+       use routing::scoring::{ChannelUsage, Score};
+       use routing::network_graph::{EffectiveCapacity, NetworkGraph, NodeId};
        use routing::router::RouteHop;
        use util::ser::{Readable, ReadableArgs, Writeable};
        use util::test_utils::TestLogger;
@@ -1392,10 +1423,13 @@ mod tests {
                });
                let source = source_node_id();
                let target = target_node_id();
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_000);
+               let usage = ChannelUsage {
+                       amount_msat: 1, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_000);
 
                SinceEpoch::advance(Duration::from_secs(1));
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_000);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_000);
        }
 
        #[test]
@@ -1409,16 +1443,19 @@ mod tests {
                });
                let source = source_node_id();
                let target = target_node_id();
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_000);
+               let usage = ChannelUsage {
+                       amount_msat: 1, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_000);
 
                scorer.payment_path_failed(&[], 42);
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_064);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_064);
 
                scorer.payment_path_failed(&[], 42);
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_128);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_128);
 
                scorer.payment_path_failed(&[], 42);
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_192);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_192);
        }
 
        #[test]
@@ -1432,25 +1469,28 @@ mod tests {
                });
                let source = source_node_id();
                let target = target_node_id();
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_000);
+               let usage = ChannelUsage {
+                       amount_msat: 1, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_000);
 
                scorer.payment_path_failed(&[], 42);
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_512);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_512);
 
                SinceEpoch::advance(Duration::from_secs(9));
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_512);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_512);
 
                SinceEpoch::advance(Duration::from_secs(1));
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_256);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_256);
 
                SinceEpoch::advance(Duration::from_secs(10 * 8));
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_001);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_001);
 
                SinceEpoch::advance(Duration::from_secs(10));
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_000);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_000);
 
                SinceEpoch::advance(Duration::from_secs(10));
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_000);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_000);
        }
 
        #[test]
@@ -1464,18 +1504,21 @@ mod tests {
                });
                let source = source_node_id();
                let target = target_node_id();
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_000);
+               let usage = ChannelUsage {
+                       amount_msat: 1, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_000);
 
                scorer.payment_path_failed(&[], 42);
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_512);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_512);
 
                // An unchecked right shift 64 bits or more in ChannelFailure::decayed_penalty_msat would
                // cause an overflow.
                SinceEpoch::advance(Duration::from_secs(10 * 64));
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_000);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_000);
 
                SinceEpoch::advance(Duration::from_secs(10));
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_000);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_000);
        }
 
        #[test]
@@ -1489,19 +1532,22 @@ mod tests {
                });
                let source = source_node_id();
                let target = target_node_id();
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_000);
+               let usage = ChannelUsage {
+                       amount_msat: 1, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_000);
 
                scorer.payment_path_failed(&[], 42);
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_512);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_512);
 
                SinceEpoch::advance(Duration::from_secs(10));
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_256);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_256);
 
                scorer.payment_path_failed(&[], 42);
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_768);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_768);
 
                SinceEpoch::advance(Duration::from_secs(10));
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_384);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_384);
        }
 
        #[test]
@@ -1515,13 +1561,16 @@ mod tests {
                });
                let source = source_node_id();
                let target = target_node_id();
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_000);
+               let usage = ChannelUsage {
+                       amount_msat: 1, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_000);
 
                scorer.payment_path_failed(&[], 42);
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_512);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_512);
 
                SinceEpoch::advance(Duration::from_secs(10));
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_256);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_256);
 
                let hop = RouteHop {
                        pubkey: PublicKey::from_slice(target.as_slice()).unwrap(),
@@ -1532,10 +1581,10 @@ mod tests {
                        cltv_expiry_delta: 18,
                };
                scorer.payment_path_successful(&[&hop]);
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_128);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_128);
 
                SinceEpoch::advance(Duration::from_secs(10));
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_064);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_064);
        }
 
        #[test]
@@ -1549,22 +1598,25 @@ mod tests {
                });
                let source = source_node_id();
                let target = target_node_id();
+               let usage = ChannelUsage {
+                       amount_msat: 1, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown
+               };
 
                scorer.payment_path_failed(&[], 42);
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_512);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_512);
 
                SinceEpoch::advance(Duration::from_secs(10));
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_256);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_256);
 
                scorer.payment_path_failed(&[], 43);
-               assert_eq!(scorer.channel_penalty_msat(43, 1, 1, &source, &target), 1_512);
+               assert_eq!(scorer.channel_penalty_msat(43, &source, &target, usage), 1_512);
 
                let mut serialized_scorer = Vec::new();
                scorer.write(&mut serialized_scorer).unwrap();
 
                let deserialized_scorer = <Scorer>::read(&mut io::Cursor::new(&serialized_scorer)).unwrap();
-               assert_eq!(deserialized_scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_256);
-               assert_eq!(deserialized_scorer.channel_penalty_msat(43, 1, 1, &source, &target), 1_512);
+               assert_eq!(deserialized_scorer.channel_penalty_msat(42, &source, &target, usage), 1_256);
+               assert_eq!(deserialized_scorer.channel_penalty_msat(43, &source, &target, usage), 1_512);
        }
 
        #[test]
@@ -1578,9 +1630,12 @@ mod tests {
                });
                let source = source_node_id();
                let target = target_node_id();
+               let usage = ChannelUsage {
+                       amount_msat: 1, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown
+               };
 
                scorer.payment_path_failed(&[], 42);
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_512);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_512);
 
                let mut serialized_scorer = Vec::new();
                scorer.write(&mut serialized_scorer).unwrap();
@@ -1588,10 +1643,10 @@ mod tests {
                SinceEpoch::advance(Duration::from_secs(10));
 
                let deserialized_scorer = <Scorer>::read(&mut io::Cursor::new(&serialized_scorer)).unwrap();
-               assert_eq!(deserialized_scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_256);
+               assert_eq!(deserialized_scorer.channel_penalty_msat(42, &source, &target, usage), 1_256);
 
                SinceEpoch::advance(Duration::from_secs(10));
-               assert_eq!(deserialized_scorer.channel_penalty_msat(42, 1, 1, &source, &target), 1_128);
+               assert_eq!(deserialized_scorer.channel_penalty_msat(42, &source, &target, usage), 1_128);
        }
 
        #[test]
@@ -1606,11 +1661,24 @@ mod tests {
                let source = source_node_id();
                let target = target_node_id();
 
-               assert_eq!(scorer.channel_penalty_msat(42, 1_000, 1_024_000, &source, &target), 0);
-               assert_eq!(scorer.channel_penalty_msat(42, 256_999, 1_024_000, &source, &target), 0);
-               assert_eq!(scorer.channel_penalty_msat(42, 257_000, 1_024_000, &source, &target), 100);
-               assert_eq!(scorer.channel_penalty_msat(42, 258_000, 1_024_000, &source, &target), 200);
-               assert_eq!(scorer.channel_penalty_msat(42, 512_000, 1_024_000, &source, &target), 256 * 100);
+               let usage = ChannelUsage {
+                       amount_msat: 1_000,
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000 },
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+
+               let usage = ChannelUsage { amount_msat: 256_999, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+
+               let usage = ChannelUsage { amount_msat: 257_000, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 100);
+
+               let usage = ChannelUsage { amount_msat: 258_000, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 200);
+
+               let usage = ChannelUsage { amount_msat: 512_000, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 256 * 100);
        }
 
        // `ProbabilisticScorer` tests
@@ -1943,18 +2011,37 @@ mod tests {
                let source = source_node_id();
                let target = target_node_id();
 
-               assert_eq!(scorer.channel_penalty_msat(42, 1_024, 1_024_000, &source, &target), 0);
-               assert_eq!(scorer.channel_penalty_msat(42, 10_240, 1_024_000, &source, &target), 0);
-               assert_eq!(scorer.channel_penalty_msat(42, 102_400, 1_024_000, &source, &target), 47);
-               assert_eq!(scorer.channel_penalty_msat(42, 1_024_000, 1_024_000, &source, &target), 2_000);
-
-               assert_eq!(scorer.channel_penalty_msat(42, 128, 1_024, &source, &target), 58);
-               assert_eq!(scorer.channel_penalty_msat(42, 256, 1_024, &source, &target), 125);
-               assert_eq!(scorer.channel_penalty_msat(42, 374, 1_024, &source, &target), 198);
-               assert_eq!(scorer.channel_penalty_msat(42, 512, 1_024, &source, &target), 300);
-               assert_eq!(scorer.channel_penalty_msat(42, 640, 1_024, &source, &target), 425);
-               assert_eq!(scorer.channel_penalty_msat(42, 768, 1_024, &source, &target), 602);
-               assert_eq!(scorer.channel_penalty_msat(42, 896, 1_024, &source, &target), 902);
+               let usage = ChannelUsage {
+                       amount_msat: 1_024,
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000 },
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+               let usage = ChannelUsage { amount_msat: 10_240, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+               let usage = ChannelUsage { amount_msat: 102_400, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 47);
+               let usage = ChannelUsage { amount_msat: 1_024_000, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000);
+
+               let usage = ChannelUsage {
+                       amount_msat: 128,
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024 },
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 58);
+               let usage = ChannelUsage { amount_msat: 256, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 125);
+               let usage = ChannelUsage { amount_msat: 374, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 198);
+               let usage = ChannelUsage { amount_msat: 512, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300);
+               let usage = ChannelUsage { amount_msat: 640, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 425);
+               let usage = ChannelUsage { amount_msat: 768, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 602);
+               let usage = ChannelUsage { amount_msat: 896, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 902);
        }
 
        #[test]
@@ -1974,10 +2061,17 @@ mod tests {
                let source = source_node_id();
                let target = target_node_id();
 
-               assert_eq!(scorer.channel_penalty_msat(42, 39, 100, &source, &target), 0);
-               assert_ne!(scorer.channel_penalty_msat(42, 50, 100, &source, &target), 0);
-               assert_ne!(scorer.channel_penalty_msat(42, 50, 100, &source, &target), u64::max_value());
-               assert_eq!(scorer.channel_penalty_msat(42, 61, 100, &source, &target), u64::max_value());
+               let usage = ChannelUsage {
+                       amount_msat: 39,
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 100 },
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+               let usage = ChannelUsage { amount_msat: 50, ..usage };
+               assert_ne!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+               assert_ne!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
+               let usage = ChannelUsage { amount_msat: 61, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
        }
 
        #[test]
@@ -1991,16 +2085,21 @@ mod tests {
                let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                let sender = sender_node_id();
                let source = source_node_id();
+               let usage = ChannelUsage {
+                       amount_msat: 500,
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000 },
+               };
                let failed_path = payment_path_for_amount(500);
                let successful_path = payment_path_for_amount(200);
 
-               assert_eq!(scorer.channel_penalty_msat(41, 500, 1_000, &sender, &source), 301);
+               assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage), 301);
 
                scorer.payment_path_failed(&failed_path.iter().collect::<Vec<_>>(), 41);
-               assert_eq!(scorer.channel_penalty_msat(41, 500, 1_000, &sender, &source), 301);
+               assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage), 301);
 
                scorer.payment_path_successful(&successful_path.iter().collect::<Vec<_>>());
-               assert_eq!(scorer.channel_penalty_msat(41, 500, 1_000, &sender, &source), 301);
+               assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage), 301);
        }
 
        #[test]
@@ -2016,15 +2115,25 @@ mod tests {
                let target = target_node_id();
                let path = payment_path_for_amount(500);
 
-               assert_eq!(scorer.channel_penalty_msat(42, 250, 1_000, &source, &target), 128);
-               assert_eq!(scorer.channel_penalty_msat(42, 500, 1_000, &source, &target), 301);
-               assert_eq!(scorer.channel_penalty_msat(42, 750, 1_000, &source, &target), 602);
+               let usage = ChannelUsage {
+                       amount_msat: 250,
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000 },
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 128);
+               let usage = ChannelUsage { amount_msat: 500, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 301);
+               let usage = ChannelUsage { amount_msat: 750, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 602);
 
                scorer.payment_path_failed(&path.iter().collect::<Vec<_>>(), 43);
 
-               assert_eq!(scorer.channel_penalty_msat(42, 250, 1_000, &source, &target), 0);
-               assert_eq!(scorer.channel_penalty_msat(42, 500, 1_000, &source, &target), 0);
-               assert_eq!(scorer.channel_penalty_msat(42, 750, 1_000, &source, &target), 300);
+               let usage = ChannelUsage { amount_msat: 250, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+               let usage = ChannelUsage { amount_msat: 500, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+               let usage = ChannelUsage { amount_msat: 750, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300);
        }
 
        #[test]
@@ -2040,15 +2149,25 @@ mod tests {
                let target = target_node_id();
                let path = payment_path_for_amount(500);
 
-               assert_eq!(scorer.channel_penalty_msat(42, 250, 1_000, &source, &target), 128);
-               assert_eq!(scorer.channel_penalty_msat(42, 500, 1_000, &source, &target), 301);
-               assert_eq!(scorer.channel_penalty_msat(42, 750, 1_000, &source, &target), 602);
+               let usage = ChannelUsage {
+                       amount_msat: 250,
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000 },
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 128);
+               let usage = ChannelUsage { amount_msat: 500, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 301);
+               let usage = ChannelUsage { amount_msat: 750, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 602);
 
                scorer.payment_path_failed(&path.iter().collect::<Vec<_>>(), 42);
 
-               assert_eq!(scorer.channel_penalty_msat(42, 250, 1_000, &source, &target), 300);
-               assert_eq!(scorer.channel_penalty_msat(42, 500, 1_000, &source, &target), u64::max_value());
-               assert_eq!(scorer.channel_penalty_msat(42, 750, 1_000, &source, &target), u64::max_value());
+               let usage = ChannelUsage { amount_msat: 250, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300);
+               let usage = ChannelUsage { amount_msat: 500, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
+               let usage = ChannelUsage { amount_msat: 750, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
        }
 
        #[test]
@@ -2064,17 +2183,22 @@ mod tests {
                let source = source_node_id();
                let target = target_node_id();
                let recipient = recipient_node_id();
+               let usage = ChannelUsage {
+                       amount_msat: 250,
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000 },
+               };
                let path = payment_path_for_amount(500);
 
-               assert_eq!(scorer.channel_penalty_msat(41, 250, 1_000, &sender, &source), 128);
-               assert_eq!(scorer.channel_penalty_msat(42, 250, 1_000, &source, &target), 128);
-               assert_eq!(scorer.channel_penalty_msat(43, 250, 1_000, &target, &recipient), 128);
+               assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage), 128);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 128);
+               assert_eq!(scorer.channel_penalty_msat(43, &target, &recipient, usage), 128);
 
                scorer.payment_path_successful(&path.iter().collect::<Vec<_>>());
 
-               assert_eq!(scorer.channel_penalty_msat(41, 250, 1_000, &sender, &source), 128);
-               assert_eq!(scorer.channel_penalty_msat(42, 250, 1_000, &source, &target), 300);
-               assert_eq!(scorer.channel_penalty_msat(43, 250, 1_000, &target, &recipient), 300);
+               assert_eq!(scorer.channel_penalty_msat(41, &sender, &source, usage), 128);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300);
+               assert_eq!(scorer.channel_penalty_msat(43, &target, &recipient, usage), 300);
        }
 
        #[test]
@@ -2090,44 +2214,70 @@ mod tests {
                let source = source_node_id();
                let target = target_node_id();
 
-               assert_eq!(scorer.channel_penalty_msat(42, 0, 1_024, &source, &target), 0);
-               assert_eq!(scorer.channel_penalty_msat(42, 1_024, 1_024, &source, &target), 2_000);
+               let usage = ChannelUsage {
+                       amount_msat: 0,
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024 },
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+               let usage = ChannelUsage { amount_msat: 1_024, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000);
 
                scorer.payment_path_failed(&payment_path_for_amount(768).iter().collect::<Vec<_>>(), 42);
                scorer.payment_path_failed(&payment_path_for_amount(128).iter().collect::<Vec<_>>(), 43);
 
-               assert_eq!(scorer.channel_penalty_msat(42, 128, 1_024, &source, &target), 0);
-               assert_eq!(scorer.channel_penalty_msat(42, 256, 1_024, &source, &target), 93);
-               assert_eq!(scorer.channel_penalty_msat(42, 768, 1_024, &source, &target), 1_479);
-               assert_eq!(scorer.channel_penalty_msat(42, 896, 1_024, &source, &target), u64::max_value());
+               let usage = ChannelUsage { amount_msat: 128, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+               let usage = ChannelUsage { amount_msat: 256, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 93);
+               let usage = ChannelUsage { amount_msat: 768, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_479);
+               let usage = ChannelUsage { amount_msat: 896, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
 
                SinceEpoch::advance(Duration::from_secs(9));
-               assert_eq!(scorer.channel_penalty_msat(42, 128, 1_024, &source, &target), 0);
-               assert_eq!(scorer.channel_penalty_msat(42, 256, 1_024, &source, &target), 93);
-               assert_eq!(scorer.channel_penalty_msat(42, 768, 1_024, &source, &target), 1_479);
-               assert_eq!(scorer.channel_penalty_msat(42, 896, 1_024, &source, &target), u64::max_value());
+               let usage = ChannelUsage { amount_msat: 128, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+               let usage = ChannelUsage { amount_msat: 256, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 93);
+               let usage = ChannelUsage { amount_msat: 768, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_479);
+               let usage = ChannelUsage { amount_msat: 896, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
 
                SinceEpoch::advance(Duration::from_secs(1));
-               assert_eq!(scorer.channel_penalty_msat(42, 64, 1_024, &source, &target), 0);
-               assert_eq!(scorer.channel_penalty_msat(42, 128, 1_024, &source, &target), 34);
-               assert_eq!(scorer.channel_penalty_msat(42, 896, 1_024, &source, &target), 1_970);
-               assert_eq!(scorer.channel_penalty_msat(42, 960, 1_024, &source, &target), u64::max_value());
+               let usage = ChannelUsage { amount_msat: 64, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+               let usage = ChannelUsage { amount_msat: 128, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 34);
+               let usage = ChannelUsage { amount_msat: 896, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1_970);
+               let usage = ChannelUsage { amount_msat: 960, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
 
                // Fully decay liquidity lower bound.
                SinceEpoch::advance(Duration::from_secs(10 * 7));
-               assert_eq!(scorer.channel_penalty_msat(42, 0, 1_024, &source, &target), 0);
-               assert_eq!(scorer.channel_penalty_msat(42, 1, 1_024, &source, &target), 0);
-               assert_eq!(scorer.channel_penalty_msat(42, 1_023, 1_024, &source, &target), 2_000);
-               assert_eq!(scorer.channel_penalty_msat(42, 1_024, 1_024, &source, &target), 2_000);
+               let usage = ChannelUsage { amount_msat: 0, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+               let usage = ChannelUsage { amount_msat: 1, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+               let usage = ChannelUsage { amount_msat: 1_023, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000);
+               let usage = ChannelUsage { amount_msat: 1_024, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000);
 
                // Fully decay liquidity upper bound.
                SinceEpoch::advance(Duration::from_secs(10));
-               assert_eq!(scorer.channel_penalty_msat(42, 0, 1_024, &source, &target), 0);
-               assert_eq!(scorer.channel_penalty_msat(42, 1_024, 1_024, &source, &target), 2_000);
+               let usage = ChannelUsage { amount_msat: 0, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+               let usage = ChannelUsage { amount_msat: 1_024, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000);
 
                SinceEpoch::advance(Duration::from_secs(10));
-               assert_eq!(scorer.channel_penalty_msat(42, 0, 1_024, &source, &target), 0);
-               assert_eq!(scorer.channel_penalty_msat(42, 1_024, 1_024, &source, &target), 2_000);
+               let usage = ChannelUsage { amount_msat: 0, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 0);
+               let usage = ChannelUsage { amount_msat: 1_024, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 2_000);
        }
 
        #[test]
@@ -2142,18 +2292,23 @@ mod tests {
                let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                let source = source_node_id();
                let target = target_node_id();
-               assert_eq!(scorer.channel_penalty_msat(42, 256, 1_024, &source, &target), 125);
+               let usage = ChannelUsage {
+                       amount_msat: 256,
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024 },
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 125);
 
                scorer.payment_path_failed(&payment_path_for_amount(512).iter().collect::<Vec<_>>(), 42);
-               assert_eq!(scorer.channel_penalty_msat(42, 256, 1_024, &source, &target), 281);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 281);
 
                // An unchecked right shift 64 bits or more in DirectedChannelLiquidity::decayed_offset_msat
                // would cause an overflow.
                SinceEpoch::advance(Duration::from_secs(10 * 64));
-               assert_eq!(scorer.channel_penalty_msat(42, 256, 1_024, &source, &target), 125);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 125);
 
                SinceEpoch::advance(Duration::from_secs(10));
-               assert_eq!(scorer.channel_penalty_msat(42, 256, 1_024, &source, &target), 125);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 125);
        }
 
        #[test]
@@ -2168,31 +2323,36 @@ mod tests {
                let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                let source = source_node_id();
                let target = target_node_id();
+               let usage = ChannelUsage {
+                       amount_msat: 512,
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024 },
+               };
 
-               assert_eq!(scorer.channel_penalty_msat(42, 512, 1_024, &source, &target), 300);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300);
 
                // More knowledge gives higher confidence (256, 768), meaning a lower penalty.
                scorer.payment_path_failed(&payment_path_for_amount(768).iter().collect::<Vec<_>>(), 42);
                scorer.payment_path_failed(&payment_path_for_amount(256).iter().collect::<Vec<_>>(), 43);
-               assert_eq!(scorer.channel_penalty_msat(42, 512, 1_024, &source, &target), 281);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 281);
 
                // Decaying knowledge gives less confidence (128, 896), meaning a higher penalty.
                SinceEpoch::advance(Duration::from_secs(10));
-               assert_eq!(scorer.channel_penalty_msat(42, 512, 1_024, &source, &target), 291);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 291);
 
                // Reducing the upper bound gives more confidence (128, 832) that the payment amount (512)
                // is closer to the upper bound, meaning a higher penalty.
                scorer.payment_path_successful(&payment_path_for_amount(64).iter().collect::<Vec<_>>());
-               assert_eq!(scorer.channel_penalty_msat(42, 512, 1_024, &source, &target), 331);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 331);
 
                // Increasing the lower bound gives more confidence (256, 832) that the payment amount (512)
                // is closer to the lower bound, meaning a lower penalty.
                scorer.payment_path_failed(&payment_path_for_amount(256).iter().collect::<Vec<_>>(), 43);
-               assert_eq!(scorer.channel_penalty_msat(42, 512, 1_024, &source, &target), 245);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 245);
 
                // Further decaying affects the lower bound more than the upper bound (128, 928).
                SinceEpoch::advance(Duration::from_secs(10));
-               assert_eq!(scorer.channel_penalty_msat(42, 512, 1_024, &source, &target), 280);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 280);
        }
 
        #[test]
@@ -2207,15 +2367,20 @@ mod tests {
                let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                let source = source_node_id();
                let target = target_node_id();
+               let usage = ChannelUsage {
+                       amount_msat: 500,
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000 },
+               };
 
                scorer.payment_path_failed(&payment_path_for_amount(500).iter().collect::<Vec<_>>(), 42);
-               assert_eq!(scorer.channel_penalty_msat(42, 500, 1_000, &source, &target), u64::max_value());
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
 
                SinceEpoch::advance(Duration::from_secs(10));
-               assert_eq!(scorer.channel_penalty_msat(42, 500, 1_000, &source, &target), 473);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 473);
 
                scorer.payment_path_failed(&payment_path_for_amount(250).iter().collect::<Vec<_>>(), 43);
-               assert_eq!(scorer.channel_penalty_msat(42, 500, 1_000, &source, &target), 300);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300);
 
                let mut serialized_scorer = Vec::new();
                scorer.write(&mut serialized_scorer).unwrap();
@@ -2223,7 +2388,7 @@ mod tests {
                let mut serialized_scorer = io::Cursor::new(&serialized_scorer);
                let deserialized_scorer =
                        <ProbabilisticScorer>::read(&mut serialized_scorer, (params, &network_graph, &logger)).unwrap();
-               assert_eq!(deserialized_scorer.channel_penalty_msat(42, 500, 1_000, &source, &target), 300);
+               assert_eq!(deserialized_scorer.channel_penalty_msat(42, &source, &target, usage), 300);
        }
 
        #[test]
@@ -2238,9 +2403,14 @@ mod tests {
                let mut scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
                let source = source_node_id();
                let target = target_node_id();
+               let usage = ChannelUsage {
+                       amount_msat: 500,
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000 },
+               };
 
                scorer.payment_path_failed(&payment_path_for_amount(500).iter().collect::<Vec<_>>(), 42);
-               assert_eq!(scorer.channel_penalty_msat(42, 500, 1_000, &source, &target), u64::max_value());
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
 
                let mut serialized_scorer = Vec::new();
                scorer.write(&mut serialized_scorer).unwrap();
@@ -2250,13 +2420,13 @@ mod tests {
                let mut serialized_scorer = io::Cursor::new(&serialized_scorer);
                let deserialized_scorer =
                        <ProbabilisticScorer>::read(&mut serialized_scorer, (params, &network_graph, &logger)).unwrap();
-               assert_eq!(deserialized_scorer.channel_penalty_msat(42, 500, 1_000, &source, &target), 473);
+               assert_eq!(deserialized_scorer.channel_penalty_msat(42, &source, &target, usage), 473);
 
                scorer.payment_path_failed(&payment_path_for_amount(250).iter().collect::<Vec<_>>(), 43);
-               assert_eq!(scorer.channel_penalty_msat(42, 500, 1_000, &source, &target), 300);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300);
 
                SinceEpoch::advance(Duration::from_secs(10));
-               assert_eq!(deserialized_scorer.channel_penalty_msat(42, 500, 1_000, &source, &target), 365);
+               assert_eq!(deserialized_scorer.channel_penalty_msat(42, &source, &target, usage), 365);
        }
 
        #[test]
@@ -2270,17 +2440,52 @@ mod tests {
                let source = source_node_id();
                let target = target_node_id();
 
-               assert_eq!(scorer.channel_penalty_msat(42, 100_000_000, 950_000_000, &source, &target), 3613);
-               assert_eq!(scorer.channel_penalty_msat(42, 100_000_000, 1_950_000_000, &source, &target), 1977);
-               assert_eq!(scorer.channel_penalty_msat(42, 100_000_000, 2_950_000_000, &source, &target), 1474);
-               assert_eq!(scorer.channel_penalty_msat(42, 100_000_000, 3_950_000_000, &source, &target), 1223);
-               assert_eq!(scorer.channel_penalty_msat(42, 100_000_000, 4_950_000_000, &source, &target), 877);
-               assert_eq!(scorer.channel_penalty_msat(42, 100_000_000, 5_950_000_000, &source, &target), 845);
-               assert_eq!(scorer.channel_penalty_msat(42, 100_000_000, 6_950_000_000, &source, &target), 500);
-               assert_eq!(scorer.channel_penalty_msat(42, 100_000_000, 7_450_000_000, &source, &target), 500);
-               assert_eq!(scorer.channel_penalty_msat(42, 100_000_000, 7_950_000_000, &source, &target), 500);
-               assert_eq!(scorer.channel_penalty_msat(42, 100_000_000, 8_950_000_000, &source, &target), 500);
-               assert_eq!(scorer.channel_penalty_msat(42, 100_000_000, 9_950_000_000, &source, &target), 500);
+               let usage = ChannelUsage {
+                       amount_msat: 100_000_000,
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 950_000_000 },
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 3613);
+               let usage = ChannelUsage {
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_950_000_000 }, ..usage
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1977);
+               let usage = ChannelUsage {
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 2_950_000_000 }, ..usage
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1474);
+               let usage = ChannelUsage {
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 3_950_000_000 }, ..usage
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 1223);
+               let usage = ChannelUsage {
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 4_950_000_000 }, ..usage
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 877);
+               let usage = ChannelUsage {
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 5_950_000_000 }, ..usage
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 845);
+               let usage = ChannelUsage {
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 6_950_000_000 }, ..usage
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 500);
+               let usage = ChannelUsage {
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_450_000_000 }, ..usage
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 500);
+               let usage = ChannelUsage {
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 7_950_000_000 }, ..usage
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 500);
+               let usage = ChannelUsage {
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 8_950_000_000 }, ..usage
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 500);
+               let usage = ChannelUsage {
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 9_950_000_000 }, ..usage
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 500);
        }
 
        #[test]
@@ -2289,19 +2494,24 @@ mod tests {
                let network_graph = network_graph();
                let source = source_node_id();
                let target = target_node_id();
+               let usage = ChannelUsage {
+                       amount_msat: 128,
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024 },
+               };
 
                let params = ProbabilisticScoringParameters {
                        liquidity_penalty_multiplier_msat: 1_000,
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
                let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
-               assert_eq!(scorer.channel_penalty_msat(42, 128, 1_024, &source, &target), 58);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 58);
 
                let params = ProbabilisticScoringParameters {
                        base_penalty_msat: 500, liquidity_penalty_multiplier_msat: 1_000, ..Default::default()
                };
                let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
-               assert_eq!(scorer.channel_penalty_msat(42, 128, 1_024, &source, &target), 558);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 558);
        }
 
        #[test]
@@ -2310,6 +2520,11 @@ mod tests {
                let network_graph = network_graph();
                let source = source_node_id();
                let target = target_node_id();
+               let usage = ChannelUsage {
+                       amount_msat: 512_000,
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024_000 },
+               };
 
                let params = ProbabilisticScoringParameters {
                        liquidity_penalty_multiplier_msat: 1_000,
@@ -2317,7 +2532,7 @@ mod tests {
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
                let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
-               assert_eq!(scorer.channel_penalty_msat(42, 512_000, 1_024_000, &source, &target), 300);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 300);
 
                let params = ProbabilisticScoringParameters {
                        liquidity_penalty_multiplier_msat: 1_000,
@@ -2325,7 +2540,7 @@ mod tests {
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
                let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
-               assert_eq!(scorer.channel_penalty_msat(42, 512_000, 1_024_000, &source, &target), 337);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 337);
        }
 
        #[test]
@@ -2334,15 +2549,61 @@ mod tests {
                let network_graph = network_graph();
                let source = source_node_id();
                let target = target_node_id();
+               let usage = ChannelUsage {
+                       amount_msat: u64::max_value(),
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::Infinite,
+               };
 
                let params = ProbabilisticScoringParameters {
                        liquidity_penalty_multiplier_msat: 40_000,
                        ..ProbabilisticScoringParameters::zero_penalty()
                };
                let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
-               assert_eq!(
-                       scorer.channel_penalty_msat(42, u64::max_value(), u64::max_value(), &source, &target),
-                       80_000,
-               );
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), 80_000);
+       }
+
+       #[test]
+       fn accounts_for_inflight_htlc_usage() {
+               let network_graph = network_graph();
+               let logger = TestLogger::new();
+               let params = ProbabilisticScoringParameters::default();
+               let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
+               let source = source_node_id();
+               let target = target_node_id();
+
+               let usage = ChannelUsage {
+                       amount_msat: 750,
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_000 },
+               };
+               assert_ne!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
+
+               let usage = ChannelUsage { inflight_htlc_msat: 251, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
+       }
+
+       #[test]
+       fn removes_uncertainity_when_exact_liquidity_known() {
+               let network_graph = network_graph();
+               let logger = TestLogger::new();
+               let params = ProbabilisticScoringParameters::default();
+               let scorer = ProbabilisticScorer::new(params, &network_graph, &logger);
+               let source = source_node_id();
+               let target = target_node_id();
+
+               let base_penalty_msat = params.base_penalty_msat;
+               let usage = ChannelUsage {
+                       amount_msat: 750,
+                       inflight_htlc_msat: 0,
+                       effective_capacity: EffectiveCapacity::ExactLiquidity { liquidity_msat: 1_000 },
+               };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), base_penalty_msat);
+
+               let usage = ChannelUsage { amount_msat: 1_000, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), base_penalty_msat);
+
+               let usage = ChannelUsage { amount_msat: 1_001, ..usage };
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage), u64::max_value());
        }
 }
index 3868d29aab49f66789c49c970a662cba19000e2e..bdd222e31c58f0ab921ae2f3f057d8264b68b560 100644 (file)
@@ -22,7 +22,15 @@ pub struct ChannelHandshakeConfig {
        /// Applied only for inbound channels (see ChannelHandshakeLimits::max_minimum_depth for the
        /// equivalent limit applied to outbound channels).
        ///
+       /// A lower-bound of 1 is applied, requiring all channels to have a confirmed commitment
+       /// transaction before operation. If you wish to accept channels with zero confirmations, see
+       /// [`UserConfig::manually_accept_inbound_channels`] and
+       /// [`ChannelManager::accept_inbound_channel_from_trusted_peer_0conf`].
+       ///
        /// Default value: 6.
+       ///
+       /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
+       /// [`ChannelManager::accept_inbound_channel_from_trusted_peer_0conf`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel_from_trusted_peer_0conf
        pub minimum_depth: u32,
        /// Set to the number of blocks we require our counterparty to wait to claim their money (ie
        /// the number of blocks we have to punish our counterparty if they broadcast a revoked
@@ -159,6 +167,24 @@ pub struct ChannelHandshakeLimits {
        ///
        /// Default value: 144, or roughly one day and only applies to outbound channels.
        pub max_minimum_depth: u32,
+       /// Whether we implicitly trust funding transactions generated by us for our own outbound
+       /// channels to not be double-spent.
+       ///
+       /// If this is set, we assume that our own funding transactions are *never* double-spent, and
+       /// thus we can trust them without any confirmations. This is generally a reasonable
+       /// assumption, given we're the only ones who could ever double-spend it (assuming we have sole
+       /// control of the signing keys).
+       ///
+       /// You may wish to un-set this if you allow the user to (or do in an automated fashion)
+       /// double-spend the funding transaction to RBF with an alternative channel open.
+       ///
+       /// This only applies if our counterparty set their confirmations-required value to 0, and we
+       /// always trust our own funding transaction at 1 confirmation irrespective of this value.
+       /// Thus, this effectively acts as a `min_minimum_depth`, with the only possible values being
+       /// `true` (0) and `false` (1).
+       ///
+       /// Default value: true
+       pub trust_own_funding_0conf: bool,
        /// Set to force an incoming channel to match our announced channel preference in
        /// [`ChannelConfig::announced_channel`].
        ///
@@ -187,6 +213,7 @@ impl Default for ChannelHandshakeLimits {
                        min_max_htlc_value_in_flight_msat: 0,
                        max_channel_reserve_satoshis: <u64>::max_value(),
                        min_max_accepted_htlcs: 0,
+                       trust_own_funding_0conf: true,
                        max_minimum_depth: 144,
                        force_announced_channel_preference: true,
                        their_to_self_delay: MAX_LOCAL_BREAKDOWN_TIMEOUT,
index 9a1f6c5bc50d77cf34cc9259bf30487759181088..34a8a410b65d1ad5c603954ee1d2290f4ba4856f 100644 (file)
@@ -364,6 +364,10 @@ pub enum Event {
                path: Vec<RouteHop>,
                /// The channel responsible for the failed payment path.
                ///
+               /// Note that for route hints or for the first hop in a path this may be an SCID alias and
+               /// may not refer to a channel in the public network graph. These aliases may also collide
+               /// with channels in the public network graph.
+               ///
                /// If this is `Some`, then the corresponding channel should be avoided when the payment is
                /// retried. May be `None` for older [`Event`] serializations.
                short_channel_id: Option<u64>,
index 8552358c35ae8d8fe8532f66cd9346409405c2c5..676c303bfa8dc1384b404cfe355aa77d99990242 100644 (file)
@@ -79,6 +79,10 @@ pub(crate) mod fake_scid {
        const MAX_NAMESPACES: u8 = 8; // We allocate 3 bits for the namespace identifier.
        const NAMESPACE_ID_BITMASK: u8 = 0b111;
 
+       const BLOCKS_PER_MONTH: u32 = 144 /* blocks per day */ * 30 /* days per month */;
+       pub(crate) const MAX_SCID_BLOCKS_FROM_NOW: u32 = BLOCKS_PER_MONTH;
+
+
        /// Fake scids are divided into namespaces, with each namespace having its own identifier between
        /// [0..7]. This allows us to identify what namespace a fake scid corresponds to upon HTLC
        /// receipt, and handle the HTLC accordingly. The namespace identifier is encrypted when encoded
@@ -100,7 +104,6 @@ pub(crate) mod fake_scid {
                        // Ensure we haven't created a namespace that doesn't fit into the 3 bits we've allocated for
                        // namespaces.
                        assert!((*self as u8) < MAX_NAMESPACES);
-                       const BLOCKS_PER_MONTH: u32 = 144 /* blocks per day */ * 30 /* days per month */;
                        let rand_bytes = keys_manager.get_secure_random_bytes();
 
                        let segwit_activation_height = segwit_activation_height(genesis_hash);
@@ -109,7 +112,7 @@ pub(crate) mod fake_scid {
                        // We want to ensure that this fake channel won't conflict with any transactions we haven't
                        // seen yet, in case `highest_seen_blockheight` is updated before we get full information
                        // about transactions confirmed in the given block.
-                       blocks_since_segwit_activation = blocks_since_segwit_activation.saturating_sub(BLOCKS_PER_MONTH);
+                       blocks_since_segwit_activation = blocks_since_segwit_activation.saturating_sub(MAX_SCID_BLOCKS_FROM_NOW);
 
                        let rand_for_height = u32::from_be_bytes(rand_bytes[..4].try_into().unwrap());
                        let fake_scid_height = segwit_activation_height + rand_for_height % (blocks_since_segwit_activation + 1);
index 69fd14640ae605bc2d481f40760ee48d3a502eb6..428adbc5e6638b5a0489eeb855d80f7c42f5ce86 100644 (file)
@@ -301,7 +301,7 @@ impl Readable for U48 {
 /// encoded in several different ways, which we must check for at deserialization-time. Thus, if
 /// you're looking for an example of a variable-length integer to use for your own project, move
 /// along, this is a rather poor design.
-pub(crate) struct BigSize(pub u64);
+pub struct BigSize(pub u64);
 impl Writeable for BigSize {
        #[inline]
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
index 3682a0e8f0af02d1ac654853bff16e3096f4025a..fe756b73ad67d28fdbe0ba3cdc9fd73865635100 100644 (file)
@@ -31,7 +31,7 @@ use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::blockdata::transaction::{Transaction, TxOut};
 use bitcoin::blockdata::script::{Builder, Script};
 use bitcoin::blockdata::opcodes;
-use bitcoin::blockdata::block::BlockHeader;
+use bitcoin::blockdata::block::Block;
 use bitcoin::network::constants::Network;
 use bitcoin::hash_types::{BlockHash, Txid};
 
@@ -116,6 +116,11 @@ impl<'a> TestChainMonitor<'a> {
                        expect_channel_force_closed: Mutex::new(None),
                }
        }
+
+       pub fn complete_sole_pending_chan_update(&self, channel_id: &[u8; 32]) {
+               let (outpoint, _, latest_update) = self.latest_monitor_update_id.lock().unwrap().get(channel_id).unwrap().clone();
+               self.chain_monitor.channel_monitor_updated(outpoint, latest_update).unwrap();
+       }
 }
 impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
        fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), chain::ChannelMonitorUpdateErr> {
@@ -224,11 +229,11 @@ impl<Signer: keysinterface::Sign> chainmonitor::Persist<Signer> for TestPersiste
 
 pub struct TestBroadcaster {
        pub txn_broadcasted: Mutex<Vec<Transaction>>,
-       pub blocks: Arc<Mutex<Vec<(BlockHeader, u32)>>>,
+       pub blocks: Arc<Mutex<Vec<(Block, u32)>>>,
 }
 
 impl TestBroadcaster {
-       pub fn new(blocks: Arc<Mutex<Vec<(BlockHeader, u32)>>>) -> TestBroadcaster {
+       pub fn new(blocks: Arc<Mutex<Vec<(Block, u32)>>>) -> TestBroadcaster {
                TestBroadcaster { txn_broadcasted: Mutex::new(Vec::new()), blocks }
        }
 }