Merge pull request #2688 from valentinewallace/2023-10-multihop-blinded-recv
authorvalentinewallace <valentinewallace@users.noreply.github.com>
Wed, 13 Dec 2023 15:31:02 +0000 (10:31 -0500)
committerGitHub <noreply@github.com>
Wed, 13 Dec 2023 15:31:02 +0000 (10:31 -0500)
Support receiving to multi-hop blinded paths

50 files changed:
.github/workflows/build.yml
CONTRIBUTING.md
Cargo.toml
bench/Cargo.toml
ci/check-cfg-flags.py [new file with mode: 0755]
ci/ci-tests.sh
fuzz/Cargo.toml
lightning-background-processor/Cargo.toml
lightning-background-processor/src/lib.rs
lightning-block-sync/Cargo.toml
lightning-block-sync/src/lib.rs
lightning-invoice/Cargo.toml
lightning-invoice/fuzz/Cargo.toml
lightning-invoice/fuzz/fuzz_targets/serde_data_part.rs
lightning-invoice/src/lib.rs
lightning-net-tokio/Cargo.toml
lightning-net-tokio/src/lib.rs
lightning-persister/Cargo.toml
lightning-persister/src/lib.rs
lightning-rapid-gossip-sync/Cargo.toml
lightning-rapid-gossip-sync/src/lib.rs
lightning-transaction-sync/Cargo.toml
lightning-transaction-sync/src/lib.rs
lightning/Cargo.toml
lightning/src/chain/chaininterface.rs
lightning/src/chain/chainmonitor.rs
lightning/src/chain/channelmonitor.rs
lightning/src/chain/mod.rs
lightning/src/chain/onchaintx.rs
lightning/src/events/bump_transaction.rs
lightning/src/lib.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/monitor_tests.rs
lightning/src/ln/onion_payment.rs
lightning/src/ln/onion_utils.rs
lightning/src/ln/payment_tests.rs
lightning/src/ln/peer_handler.rs
lightning/src/ln/reload_tests.rs
lightning/src/ln/reorg_tests.rs
lightning/src/onion_message/messenger.rs
lightning/src/routing/gossip.rs
lightning/src/routing/router.rs
lightning/src/routing/scoring.rs
lightning/src/util/logger.rs
lightning/src/util/ser.rs
lightning/src/util/test_utils.rs
msrv-no-dev-deps-check/Cargo.toml
no-std-check/Cargo.toml

index 00ef76f787e188e0e23fc94b2244bb1cbca8673f..65be3faef8b5578e0e44b84a6e3651be360af01f 100644 (file)
@@ -18,19 +18,7 @@ jobs:
       fail-fast: false
       matrix:
         platform: [ ubuntu-latest, windows-latest, macos-latest ]
-        toolchain: [ stable, beta ]
-        include:
-          - toolchain: stable
-            platform: ubuntu-latest
-          # 1.48.0 is the MSRV for all crates except lightning-transaction-sync and Win/Mac
-          - toolchain: 1.48.0
-            platform: ubuntu-latest
-          # Windows requires 1.49.0 because that's the MSRV for supported Tokio
-          - toolchain: 1.49.0
-            platform: windows-latest
-          # MacOS-latest requires 1.54.0 because that's what's required for linking to work properly
-          - toolchain: 1.54.0
-            platform: macos-latest
+        toolchain: [ stable, beta, 1.63.0 ] # 1.63.0 is the MSRV for all crates.
     runs-on: ${{ matrix.platform }}
     steps:
       - name: Checkout source code
@@ -44,6 +32,8 @@ jobs:
         run: |
           rustup target add thumbv7m-none-eabi
           sudo apt-get -y install gcc-arm-none-eabi
+      - name: Check for unknown cfg tags
+        run: ci/check-cfg-flags.py
       - name: shellcheck the CI script
         if: "matrix.platform == 'ubuntu-latest'"
         run: |
@@ -168,13 +158,13 @@ jobs:
         run: |
           cargo check --release
           cargo check --no-default-features --features=no-std --release
-          cargo check --no-default-features --features=futures --release
+          cargo check --no-default-features --features=futures,std --release
           cargo doc --release
       - name: Run cargo check for Taproot build.
         run: |
           cargo check --release
           cargo check --no-default-features --features=no-std --release
-          cargo check --no-default-features --features=futures --release
+          cargo check --no-default-features --features=futures,std --release
           cargo doc --release
         env:
           RUSTFLAGS: '--cfg=taproot'
index 350415af24cc0ad86d200a0199797c53a0518572..78c515007d03a75ba2b0261d25ebc74f46531244 100644 (file)
@@ -88,7 +88,7 @@ be covered by functional tests.
 When refactoring, structure your PR to make it easy to review and don't
 hesitate to split it into multiple small, focused PRs.
 
-The Minimum Supported Rust Version (MSRV) currently is 1.48.0 (enforced by
+The Minimum Supported Rust Version (MSRV) currently is 1.63.0 (enforced by
 our GitHub Actions). We support reading serialized LDK objects written by any
 version of LDK 0.0.99 and above. We support LDK versions 0.0.113 and above
 reading serialized LDK objects written by modern LDK. Any expected issues with
index 8614cb48c1f15207023d9fd00b952600804d4e1b..a12f6ff9ff3de2ded9fbc368029aba53f945f066 100644 (file)
@@ -1,4 +1,5 @@
 [workspace]
+resolver = "2"
 
 members = [
     "lightning",
@@ -7,11 +8,11 @@ members = [
     "lightning-net-tokio",
     "lightning-persister",
     "lightning-background-processor",
-    "lightning-rapid-gossip-sync"
+    "lightning-rapid-gossip-sync",
+    "lightning-custom-message",
 ]
 
 exclude = [
-    "lightning-custom-message",
     "lightning-transaction-sync",
     "no-std-check",
     "msrv-no-dev-deps-check",
index e582d29da8108981ac4b42cf712e5d04624ed854..05354890c2a8fb4f5f0e9233fa273d444aac7cd9 100644 (file)
@@ -2,7 +2,7 @@
 name = "lightning-bench"
 version = "0.0.1"
 authors = ["Matt Corallo"]
-edition = "2018"
+edition = "2021"
 
 [[bench]]
 name = "bench"
diff --git a/ci/check-cfg-flags.py b/ci/check-cfg-flags.py
new file mode 100755 (executable)
index 0000000..85cbde8
--- /dev/null
@@ -0,0 +1,152 @@
+#!/usr/bin/env python3
+# Rust is fairly relaxed in checking the validity of arguments passed to #[cfg].
+# While it should probably be more strict when checking features, it cannot be
+# strict when checking loose cfg tags, because those can be anything and are
+# simply passed to rustc via unconstrained arguments.
+#
+# Thus, we do it for rustc manually, but scanning all our source and checking
+# that all our cfg tags match a known cfg tag.
+import sys, glob, re
+
+def check_feature(feature):
+    if feature == "std":
+        pass
+    elif feature == "no-std":
+        pass
+    elif feature == "hashbrown":
+        pass
+    elif feature == "backtrace":
+        pass
+    elif feature == "grind_signatures":
+        pass
+    elif feature == "unsafe_revoked_tx_signing":
+        pass
+    elif feature == "futures":
+        pass
+    elif feature == "tokio":
+        pass
+    elif feature == "rest-client":
+        pass
+    elif feature == "rpc-client":
+        pass
+    elif feature == "serde":
+        pass
+    elif feature == "esplora-blocking":
+        pass
+    elif feature == "esplora-async":
+        pass
+    elif feature == "async-interface":
+        pass
+    elif feature == "electrum":
+        pass
+    elif feature == "_test_utils":
+        pass
+    elif feature == "_test_vectors":
+        pass
+    elif feature == "afl":
+        pass
+    elif feature == "honggfuzz":
+        pass
+    elif feature == "libfuzzer_fuzz":
+        pass
+    elif feature == "stdin_fuzz":
+        pass
+    elif feature == "max_level_off":
+        pass
+    elif feature == "max_level_error":
+        pass
+    elif feature == "max_level_warn":
+        pass
+    elif feature == "max_level_info":
+        pass
+    elif feature == "max_level_debug":
+        pass
+    elif feature == "max_level_trace":
+        pass
+    else:
+        print("Bad feature: " + feature)
+        assert False
+
+def check_target_os(os):
+    if os == "windows":
+        pass
+    else:
+        assert False
+
+def check_cfg_tag(cfg):
+    if cfg == "fuzzing":
+        pass
+    elif cfg == "test":
+        pass
+    elif cfg == "debug_assertions":
+        pass
+    elif cfg == "c_bindings":
+        pass
+    elif cfg == "ldk_bench":
+        pass
+    elif cfg == "taproot":
+        pass
+    elif cfg == "require_route_graph_test":
+        pass
+    else:
+        print("Bad cfg tag: " + cfg)
+        assert False
+
+def check_cfg_args(cfg):
+    if cfg.startswith("all(") or cfg.startswith("any(") or cfg.startswith("not("):
+        brackets = 1
+        pos = 4
+        while pos < len(cfg):
+            if cfg[pos] == "(":
+                brackets += 1
+            elif cfg[pos] == ")":
+                brackets -= 1
+                if brackets == 0:
+                    check_cfg_args(cfg[4:pos])
+                    if pos + 1 != len(cfg):
+                        assert cfg[pos + 1] == ","
+                        check_cfg_args(cfg[pos + 2:].strip())
+                    return
+            pos += 1
+        assert False
+        assert(cfg.endswith(")"))
+        check_cfg_args(cfg[4:len(cfg)-1])
+    else:
+        parts = [part.strip() for part in cfg.split(",", 1)]
+        if len(parts) > 1:
+            for part in parts:
+                check_cfg_args(part)
+        elif cfg.startswith("feature") or cfg.startswith("target_os") or cfg.startswith("target_pointer_width"):
+            arg = cfg
+            if cfg.startswith("feature"):
+                arg = arg[7:].strip()
+            elif cfg.startswith("target_os"):
+                arg = arg[9:].strip()
+            else:
+                arg = arg[20:].strip()
+            assert arg.startswith("=")
+            arg = arg[1:].strip()
+            assert arg.startswith("\"")
+            assert arg.endswith("\"")
+            arg = arg[1:len(arg)-1]
+            assert not "\"" in arg
+            if cfg.startswith("feature"):
+                check_feature(arg)
+            elif cfg.startswith("target_os"):
+                check_target_os(arg)
+            else:
+                assert arg == "32" or arg == "64"
+        else:
+            check_cfg_tag(cfg.strip())
+
+cfg_regex = re.compile("#\[cfg\((.*)\)\]")
+for path in glob.glob(sys.path[0] + "/../**/*.rs", recursive = True):
+    with open(path, "r") as file:
+        while True:
+            line = file.readline()
+            if not line:
+                break
+            if "#[cfg(" in line:
+                if not line.strip().startswith("//"):
+                    cfg_part = cfg_regex.match(line.strip()).group(1)
+                    check_cfg_args(cfg_part)
index 2db32a1081c2c07d5a4f7e30c03e40ce79fbe9ff..11934a8307a6f0723aeeb7a5094f3853a0c0a981 100755 (executable)
@@ -8,38 +8,60 @@ HOST_PLATFORM="$(rustc --version --verbose | grep "host:" | awk '{ print $2 }')"
 # which we do here.
 # Further crates which appear only as dev-dependencies are pinned further down.
 function PIN_RELEASE_DEPS {
-       # Tokio MSRV on versions 1.17 through 1.26 is rustc 1.49. Above 1.26 MSRV is 1.56.
-       [ "$RUSTC_MINOR_VERSION" -lt 49 ] && cargo update -p tokio --precise "1.14.1" --verbose
-       [[ "$RUSTC_MINOR_VERSION" -gt 48  &&  "$RUSTC_MINOR_VERSION" -lt 56 ]] && cargo update -p tokio --precise "1.25.1" --verbose
-
-       # Sadly the log crate is always a dependency of tokio until 1.20, and has no reasonable MSRV guarantees
-       [ "$RUSTC_MINOR_VERSION" -lt 49 ] && cargo update -p log --precise "0.4.18" --verbose
-
-       # The serde_json crate switched to Rust edition 2021 starting with v1.0.101, i.e., has MSRV of 1.56
-       [ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p serde_json --precise "1.0.100" --verbose
-
        return 0 # Don't fail the script if our rustc is higher than the last check
 }
 
-PIN_RELEASE_DEPS # pin the release dependencies in our main workspace
-
-# The addr2line v0.20 crate (a dependency of `backtrace` starting with 0.3.68) relies on 1.55+
-[ "$RUSTC_MINOR_VERSION" -lt 55 ] && cargo update -p backtrace --precise "0.3.67" --verbose
-
-# The quote crate switched to Rust edition 2021 starting with v1.0.31, i.e., has MSRV of 1.56
-[ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p quote --precise "1.0.30" --verbose
+# The tests of `lightning-transaction-sync` require `electrs` and `bitcoind`
+# binaries. Here, we download the binaries, validate them, and export their
+# location via `ELECTRS_EXE`/`BITCOIND_EXE` which will be used by the
+# `electrsd`/`bitcoind` crates in our tests.
+function DOWNLOAD_ELECTRS_AND_BITCOIND {
+       ELECTRS_DL_ENDPOINT="https://github.com/RCasatta/electrsd/releases/download/electrs_releases"
+       ELECTRS_VERSION="esplora_a33e97e1a1fc63fa9c20a116bb92579bbf43b254"
+       BITCOIND_DL_ENDPOINT="https://bitcoincore.org/bin/"
+       BITCOIND_VERSION="25.1"
+       if [[ "$HOST_PLATFORM" == *linux* ]]; then
+               ELECTRS_DL_FILE_NAME=electrs_linux_"$ELECTRS_VERSION".zip
+               ELECTRS_DL_HASH="865e26a96e8df77df01d96f2f569dcf9622fc87a8d99a9b8fe30861a4db9ddf1"
+               BITCOIND_DL_FILE_NAME=bitcoin-"$BITCOIND_VERSION"-x86_64-linux-gnu.tar.gz
+               BITCOIND_DL_HASH="a978c407b497a727f0444156e397b50491ce862d1f906fef9b521415b3611c8b"
+       elif [[ "$HOST_PLATFORM" == *darwin* ]]; then
+               ELECTRS_DL_FILE_NAME=electrs_macos_"$ELECTRS_VERSION".zip
+               ELECTRS_DL_HASH="2d5ff149e8a2482d3658e9b386830dfc40c8fbd7c175ca7cbac58240a9505bcd"
+               BITCOIND_DL_FILE_NAME=bitcoin-"$BITCOIND_VERSION"-x86_64-apple-darwin.tar.gz
+               BITCOIND_DL_HASH="1acfde0ec3128381b83e3e5f54d1c7907871d324549129592144dd12a821eff1"
+       else
+               echo -e "\n\nUnsupported platform. Exiting.."
+               exit 1
+       fi
+
+       DL_TMP_DIR=$(mktemp -d)
+       trap 'rm -rf -- "$DL_TMP_DIR"' EXIT
+
+       pushd "$DL_TMP_DIR"
+       ELECTRS_DL_URL="$ELECTRS_DL_ENDPOINT"/"$ELECTRS_DL_FILE_NAME"
+       curl -L -o "$ELECTRS_DL_FILE_NAME" "$ELECTRS_DL_URL"
+       echo "$ELECTRS_DL_HASH  $ELECTRS_DL_FILE_NAME"|shasum -a 256 -c
+       unzip "$ELECTRS_DL_FILE_NAME"
+       export ELECTRS_EXE="$DL_TMP_DIR"/electrs
+       chmod +x "$ELECTRS_EXE"
+
+       BITCOIND_DL_URL="$BITCOIND_DL_ENDPOINT"/bitcoin-core-"$BITCOIND_VERSION"/"$BITCOIND_DL_FILE_NAME"
+       curl -L -o "$BITCOIND_DL_FILE_NAME" "$BITCOIND_DL_URL"
+       echo "$BITCOIND_DL_HASH  $BITCOIND_DL_FILE_NAME"|shasum -a 256 -c
+       tar xzf "$BITCOIND_DL_FILE_NAME"
+       export BITCOIND_EXE="$DL_TMP_DIR"/bitcoin-"$BITCOIND_VERSION"/bin/bitcoind
+       chmod +x "$BITCOIND_EXE"
+       popd
+}
 
-# The syn crate depends on too-new proc-macro2 starting with v2.0.33, i.e., has MSRV of 1.56
-if [ "$RUSTC_MINOR_VERSION" -lt 56 ]; then
-       SYN_2_DEP=$(grep -o '"syn 2.*' Cargo.lock | tr -d '",' | tr ' ' ':')
-       cargo update -p "$SYN_2_DEP" --precise "2.0.32" --verbose
-fi
+PIN_RELEASE_DEPS # pin the release dependencies in our main workspace
 
-# The proc-macro2 crate switched to Rust edition 2021 starting with v1.0.66, i.e., has MSRV of 1.56
-[ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p proc-macro2 --precise "1.0.65" --verbose
+# Starting with version 1.10.0, the `regex` crate has an MSRV of rustc 1.65.0.
+[ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p regex --precise "1.9.6" --verbose
 
-# The memchr crate switched to an MSRV of 1.60 starting with v2.6.0
-[ "$RUSTC_MINOR_VERSION" -lt 60 ] && cargo update -p memchr --precise "2.5.0" --verbose
+# The addr2line v0.21 crate (a dependency of `backtrace` starting with 0.3.69) relies on rustc 1.65
+[ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p backtrace --precise "0.3.68" --verbose
 
 export RUST_BACKTRACE=1
 
@@ -59,17 +81,26 @@ cargo test --verbose --color always --features rpc-client,rest-client,tokio
 cargo check --verbose --color always --features rpc-client,rest-client,tokio
 popd
 
-if [[ $RUSTC_MINOR_VERSION -gt 67 && "$HOST_PLATFORM" != *windows* ]]; then
+if [[ "$HOST_PLATFORM" != *windows* ]]; then
        echo -e "\n\nBuilding and testing Transaction Sync Clients with features"
        pushd lightning-transaction-sync
-       cargo test --verbose --color always --features esplora-blocking
-       cargo check --verbose --color always --features esplora-blocking
-       cargo test --verbose --color always --features esplora-async
-       cargo check --verbose --color always --features esplora-async
-       cargo test --verbose --color always --features esplora-async-https
-       cargo check --verbose --color always --features esplora-async-https
-       cargo test --verbose --color always --features electrum
-       cargo check --verbose --color always --features electrum
+
+       # reqwest 0.11.21 had a regression that broke its 1.63.0 MSRV
+       [ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p reqwest --precise "0.11.20" --verbose
+       # Starting with version 1.10.0, the `regex` crate has an MSRV of rustc 1.65.0.
+       [ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p regex --precise "1.9.6" --verbose
+
+       DOWNLOAD_ELECTRS_AND_BITCOIND
+
+       RUSTFLAGS="--cfg no_download" cargo test --verbose --color always --features esplora-blocking
+       RUSTFLAGS="--cfg no_download" cargo check --verbose --color always --features esplora-blocking
+       RUSTFLAGS="--cfg no_download" cargo test --verbose --color always --features esplora-async
+       RUSTFLAGS="--cfg no_download" cargo check --verbose --color always --features esplora-async
+       RUSTFLAGS="--cfg no_download" cargo test --verbose --color always --features esplora-async-https
+       RUSTFLAGS="--cfg no_download" cargo check --verbose --color always --features esplora-async-https
+       RUSTFLAGS="--cfg no_download" cargo test --verbose --color always --features electrum
+       RUSTFLAGS="--cfg no_download" cargo check --verbose --color always --features electrum
+
        popd
 fi
 
@@ -78,20 +109,16 @@ pushd lightning-background-processor
 cargo test --verbose --color always --features futures
 popd
 
-if [ "$RUSTC_MINOR_VERSION" -gt 55 ]; then
-       echo -e "\n\nTest Custom Message Macros"
-       pushd lightning-custom-message
-       cargo test --verbose --color always
-       [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean
-       popd
-fi
+echo -e "\n\nTest Custom Message Macros"
+pushd lightning-custom-message
+cargo test --verbose --color always
+[ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean
+popd
 
-if [ "$RUSTC_MINOR_VERSION" -gt 51 ]; then # Current `object` MSRV, subject to change
-       echo -e "\n\nTest backtrace-debug builds"
-       pushd lightning
-       cargo test --verbose --color always --features backtrace
-       popd
-fi
+echo -e "\n\nTest backtrace-debug builds"
+pushd lightning
+cargo test --verbose --color always --features backtrace
+popd
 
 echo -e "\n\nBuilding with all Log-Limiting features"
 pushd lightning
@@ -102,13 +129,14 @@ popd
 
 echo -e "\n\nTesting no-std flags in various combinations"
 for DIR in lightning lightning-invoice lightning-rapid-gossip-sync; do
-       [ "$RUSTC_MINOR_VERSION" -gt 50 ] && cargo test -p $DIR --verbose --color always --no-default-features --features no-std
+       cargo test -p $DIR --verbose --color always --no-default-features --features no-std
        # check if there is a conflict between no-std and the default std feature
-       [ "$RUSTC_MINOR_VERSION" -gt 50 ] && cargo test -p $DIR --verbose --color always --features no-std
+       cargo test -p $DIR --verbose --color always --features no-std
 done
+
 for DIR in lightning lightning-invoice lightning-rapid-gossip-sync; do
        # check if there is a conflict between no-std and the c_bindings cfg
-       [ "$RUSTC_MINOR_VERSION" -gt 50 ] && RUSTFLAGS="--cfg=c_bindings" cargo test -p $DIR --verbose --color always --no-default-features --features=no-std
+       RUSTFLAGS="--cfg=c_bindings" cargo test -p $DIR --verbose --color always --no-default-features --features=no-std
 done
 RUSTFLAGS="--cfg=c_bindings" cargo test --verbose --color always
 
@@ -125,16 +153,7 @@ popd
 echo -e "\n\nTesting no-std build on a downstream no-std crate"
 # check no-std compatibility across dependencies
 pushd no-std-check
-if [[ $RUSTC_MINOR_VERSION -gt 67 ]]; then
-       # lightning-transaction-sync's MSRV is 1.67
-       cargo check --verbose --color always --features lightning-transaction-sync
-else
-       # The memchr crate switched to an MSRV of 1.60 starting with v2.6.0
-       # This is currently only a release dependency via core2, which we intend to work with
-       # rust-bitcoin to remove soon.
-       [ "$RUSTC_MINOR_VERSION" -lt 60 ] && cargo update -p memchr --precise "2.5.0" --verbose
-       cargo check --verbose --color always
-fi
+cargo check --verbose --color always --features lightning-transaction-sync
 [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean
 popd
 
index 573096efdfcfb3983fa94448c57453e9b21cdde4..0c279a015c7398fe2ba0d8ba5e5b7ec49f2a154a 100644 (file)
@@ -3,7 +3,7 @@ name = "lightning-fuzz"
 version = "0.0.1"
 authors = ["Automatically generated"]
 publish = false
-edition = "2018"
+edition = "2021"
 # Because the function is unused it gets dropped before we link lightning, so
 # we have to duplicate build.rs here. Note that this is only required for
 # fuzzing mode.
index 933cbc466a677a8cd1f93e9d5e63219682477870..53358d43c9e8d8335cd2e6ecd1b6b55c83b74557 100644 (file)
@@ -7,7 +7,7 @@ repository = "https://github.com/lightningdevkit/rust-lightning"
 description = """
 Utilities to perform required background tasks for Rust Lightning.
 """
-edition = "2018"
+edition = "2021"
 
 [package.metadata.docs.rs]
 all-features = true
index 449891ba07f2d09e1dd4fa155f8322591e15b3dd..62c03b81dfe89288e726ae1ee89f383322ce68ae 100644 (file)
@@ -2,9 +2,8 @@
 //! running properly, and (2) either can or should be run in the background. See docs for
 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
 
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
 
 #![deny(missing_docs)]
 #![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
@@ -904,7 +903,7 @@ mod tests {
        use lightning::ln::functional_test_utils::*;
        use lightning::ln::msgs::{ChannelMessageHandler, Init};
        use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
-       use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
+       use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
        use lightning::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp, LockableScore};
        use lightning::routing::router::{DefaultRouter, Path, RouteHop, CandidateRouteHop};
        use lightning::util::config::UserConfig;
index 5a8f887c1d82b2b9acb9739bd9055b96653340f5..c7f7191ed55e2704b11e2be5e9e740668c42cc62 100644 (file)
@@ -7,7 +7,7 @@ repository = "https://github.com/lightningdevkit/rust-lightning"
 description = """
 Utilities to fetch the chain data from a block source and feed them into Rust Lightning.
 """
-edition = "2018"
+edition = "2021"
 
 [package.metadata.docs.rs]
 all-features = true
@@ -21,7 +21,7 @@ rpc-client = [ "serde_json", "chunked_transfer" ]
 bitcoin = "0.30.2"
 hex = { package = "hex-conservative", version = "0.1.1", default-features = false }
 lightning = { version = "0.0.118", path = "../lightning" }
-tokio = { version = "1.0", features = [ "io-util", "net", "time" ], optional = true }
+tokio = { version = "1.0", features = [ "io-util", "net", "time", "rt" ], optional = true }
 serde_json = { version = "1.0", optional = true }
 chunked_transfer = { version = "1.4", optional = true }
 
index 77ff3f0810b6f4a3bd6addd98a11faa383660b9d..4a01d4673b31e91d56c3cb350d995c1b7a3d7403 100644 (file)
@@ -13,9 +13,8 @@
 //! Both features support either blocking I/O using `std::net::TcpStream` or, with feature `tokio`,
 //! non-blocking I/O using `tokio::net::TcpStream` from inside a Tokio runtime.
 
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
 
 #![deny(missing_docs)]
 #![deny(unsafe_code)]
index 0b45fa41e6cbcdede6a5bea5c71446a5f0d0f239..5377b7ef3356313d46aec61bf40275a0d4e04a4a 100644 (file)
@@ -8,7 +8,7 @@ license = "MIT OR Apache-2.0"
 keywords = [ "lightning", "bitcoin", "invoice", "BOLT11" ]
 readme = "README.md"
 repository = "https://github.com/lightningdevkit/rust-lightning/"
-edition = "2018"
+edition = "2021"
 
 [package.metadata.docs.rs]
 all-features = true
index 0d5fbb365ba3063fefa0b189bb858e7b7c843091..746fe63ba03d0d38e983212fa01f37a4a6d07551 100644 (file)
@@ -3,7 +3,7 @@ name = "lightning-invoice-fuzz"
 version = "0.0.1"
 authors = ["Automatically generated"]
 publish = false
-edition = "2018"
+edition = "2021"
 
 [package.metadata]
 cargo-fuzz = true
index 406f96764e9971ad97abc4f7dbdfb352e94cad35..871c6c7d755ee4dad7656f7649a0681ada25f3c5 100644 (file)
@@ -48,9 +48,9 @@ mod tests {
         for (idx, c) in hex.as_bytes().iter().filter(|&&c| c != b'\n').enumerate() {
             b <<= 4;
             match *c {
-                b'A'...b'F' => b |= c - b'A' + 10,
-                b'a'...b'f' => b |= c - b'a' + 10,
-                b'0'...b'9' => b |= c - b'0',
+                b'A'..=b'F' => b |= c - b'A' + 10,
+                b'a'..=b'f' => b |= c - b'a' + 10,
+                b'0'..=b'9' => b |= c - b'0',
                 _ => panic!("Bad hex"),
             }
             if (idx & 1) == 1 {
index d87c6c89372080b5696fd754f35fb8eeddf1c75d..89763aa758915dff924fd6155604f5a27f308bcf 100644 (file)
@@ -1,6 +1,5 @@
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
 
 #![deny(missing_docs)]
 #![deny(non_upper_case_globals)]
index 247481fcfc853e5da507cd91b2cb96e0c016fd5d..e491dec5d458f413038592ff7db26e5849eb28f6 100644 (file)
@@ -8,7 +8,7 @@ description = """
 Implementation of the rust-lightning network stack using Tokio.
 For Rust-Lightning clients which wish to make direct connections to Lightning P2P nodes, this is a simple alternative to implementing the required network stack, especially for those already using Tokio.
 """
-edition = "2018"
+edition = "2021"
 
 [package.metadata.docs.rs]
 all-features = true
index d02f23fdd2cfd8649e9238cdda5a8fba470ace5c..1aa2cc25a13cc0772ee2e958da66aca602ce0eb4 100644 (file)
@@ -22,9 +22,8 @@
 //!
 //! [`PeerManager`]: lightning::ln::peer_handler::PeerManager
 
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
 
 #![deny(missing_docs)]
 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
index 387366bff0e9f4826af513633f126d42b1e9253c..4206b5f2226401430e5d9c28a474af4d656077f0 100644 (file)
@@ -7,7 +7,7 @@ repository = "https://github.com/lightningdevkit/rust-lightning"
 description = """
 Utilities for LDK data persistence and retrieval.
 """
-edition = "2018"
+edition = "2021"
 
 [package.metadata.docs.rs]
 all-features = true
index ae258e137d742f32ce1067d6508ad133e02bd67a..8e7d9055a6aae014b80daf6d9280717fda1a713f 100644 (file)
@@ -1,8 +1,7 @@
 //! Provides utilities for LDK data persistence and retrieval.
-//
-// TODO: Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
 
 #![deny(missing_docs)]
 
index 2018e3b24836094684c9b5f4848b6f28c808abee..006725b196bb111a7dc9cfbaf22b8805b5745296 100644 (file)
@@ -4,7 +4,7 @@ version = "0.0.118"
 authors = ["Arik Sosman <git@arik.io>"]
 license = "MIT OR Apache-2.0"
 repository = "https://github.com/lightningdevkit/rust-lightning"
-edition = "2018"
+edition = "2021"
 description = """
 Utility to process gossip routing data from Rapid Gossip Sync Server.
 """
index c15eedabbe19b3a8f70d6a15d64c1d2ca705ae6c..0561975f82151e0deae7782c4893b1056da4b0d5 100644 (file)
@@ -1,6 +1,5 @@
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
 
 #![deny(missing_docs)]
 #![deny(unsafe_code)]
index 20e03ce6c2767721295e139bef579ff5e1fa63be..d712229b0a820b841579f883594dcfed7a5f44ac 100644 (file)
@@ -7,7 +7,7 @@ repository = "https://github.com/lightningdevkit/rust-lightning"
 description = """
 Utilities for syncing LDK via the transaction-based `Confirm` interface.
 """
-edition = "2018"
+edition = "2021"
 
 [package.metadata.docs.rs]
 all-features = true
@@ -22,7 +22,7 @@ electrum = ["electrum-client"]
 async-interface = []
 
 [dependencies]
-lightning = { version = "0.0.118", path = "../lightning", default-features = false }
+lightning = { version = "0.0.118", path = "../lightning", default-features = false, features = ["std"] }
 bitcoin = { version = "0.30.2", default-features = false }
 bdk-macros = "0.6"
 futures = { version = "0.3", optional = true }
@@ -30,6 +30,11 @@ esplora-client = { version = "0.6", default-features = false, optional = true }
 electrum-client = { version = "0.18.0", optional = true }
 
 [dev-dependencies]
-lightning = { version = "0.0.118", path = "../lightning", features = ["std", "_test_utils"] }
-electrsd = { version = "0.26.0", features = ["legacy", "esplora_a33e97e1", "bitcoind_25_0"] }
+lightning = { version = "0.0.118", path = "../lightning", default-features = false, features = ["std", "_test_utils"] }
 tokio = { version = "1.14.0", features = ["full"] }
+
+[target.'cfg(not(no_download))'.dev-dependencies]
+electrsd = { version = "0.26.0", default-features = false, features = ["legacy", "esplora_a33e97e1", "bitcoind_25_0"] }
+
+[target.'cfg(no_download)'.dev-dependencies]
+electrsd = { version = "0.26.0", default-features = false, features = ["legacy"] }
index 21b6a4e97c15d0e3261251db829dc1c38e7c9c92..7bd4b4aee3f0784d5ad5aa92c7638e49b08790aa 100644 (file)
@@ -58,9 +58,8 @@
 //! [`ChainMonitor`]: lightning::chain::chainmonitor::ChainMonitor
 //! [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
 
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
 
 #![deny(missing_docs)]
 #![deny(unsafe_code)]
index 76d751af39192754c67575bdc27650ab89aa88e3..3c3912e2b32fc29a130b9d78d504a6826b17cb54 100644 (file)
@@ -9,7 +9,7 @@ A Bitcoin Lightning library in Rust.
 Does most of the hard work, without implying a specific runtime, requiring clients implement basic network logic, chain interactions and disk storage.
 Still missing tons of error-handling. See GitHub issues for suggested projects if you want to contribute. Don't have to bother telling you not to use this for anything serious, because you'd have to build a client around it to even try.
 """
-edition = "2018"
+edition = "2021"
 
 [package.metadata.docs.rs]
 features = ["std"]
index 2d7f0c18af395b12eff9fa87863c77e57c9567e1..1f42dc2fe4251a26be3c7b659bae89505893b6e3 100644 (file)
@@ -40,7 +40,7 @@ pub trait BroadcasterInterface {
        /// be sure to manage both cases correctly.
        ///
        /// Bitcoin transaction packages are defined in BIP 331 and here:
-       /// https://github.com/bitcoin/bitcoin/blob/master/doc/policy/packages.md
+       /// <https://github.com/bitcoin/bitcoin/blob/master/doc/policy/packages.md>
        fn broadcast_transactions(&self, txs: &[&Transaction]);
 }
 
index 1fe3fcd9ff11a8046de0a64f2e95ae7968fdb662..09a32b6e2145e79a16121819f3112a31335ae5ca 100644 (file)
@@ -402,7 +402,8 @@ where C::Target: chain::Filter,
                                                outpoint: OutPoint { txid, index: idx as u16 },
                                                script_pubkey: output.script_pubkey,
                                        };
-                                       chain_source.register_output(output)
+                                       log_trace!(logger, "Adding monitoring for spends of outpoint {} to the filter", output.outpoint);
+                                       chain_source.register_output(output);
                                }
                        }
                }
@@ -741,7 +742,7 @@ where C::Target: chain::Filter,
                        },
                }
                if let Some(ref chain_source) = self.chain_source {
-                       monitor.load_outputs_to_watch(chain_source);
+                       monitor.load_outputs_to_watch(chain_source , &self.logger);
                }
                entry.insert(MonitorHolder {
                        monitor,
index ce1ef9128f91efe8327f2f874daca511e7c6e4f1..244384faa224f68bafd5df1b02cda326ec722493 100644 (file)
@@ -1382,15 +1382,22 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
        /// Loads the funding txo and outputs to watch into the given `chain::Filter` by repeatedly
        /// calling `chain::Filter::register_output` and `chain::Filter::register_tx` until all outputs
        /// have been registered.
-       pub fn load_outputs_to_watch<F: Deref>(&self, filter: &F) where F::Target: chain::Filter {
+       pub fn load_outputs_to_watch<F: Deref, L: Deref>(&self, filter: &F, logger: &L) 
+       where 
+               F::Target: chain::Filter, L::Target: Logger,
+       {
                let lock = self.inner.lock().unwrap();
+               let logger = WithChannelMonitor::from_impl(logger, &*lock);
+               log_trace!(&logger, "Registering funding outpoint {}", &lock.get_funding_txo().0);
                filter.register_tx(&lock.get_funding_txo().0.txid, &lock.get_funding_txo().1);
                for (txid, outputs) in lock.get_outputs_to_watch().iter() {
                        for (index, script_pubkey) in outputs.iter() {
                                assert!(*index <= u16::max_value() as u32);
+                               let outpoint = OutPoint { txid: *txid, index: *index as u16 };
+                               log_trace!(logger, "Registering outpoint {} with the filter for monitoring spends", outpoint);
                                filter.register_output(WatchedOutput {
                                        block_hash: None,
-                                       outpoint: OutPoint { txid: *txid, index: *index as u16 },
+                                       outpoint,
                                        script_pubkey: script_pubkey.clone(),
                                });
                        }
@@ -2284,7 +2291,7 @@ macro_rules! fail_unbroadcast_htlcs {
                                                        // broadcastable commitment transaction has the HTLC in it, but it
                                                        // cannot currently change after channel initialization, so we don't
                                                        // need to here.
-                                                       let confirmed_htlcs_iter: &mut Iterator<Item = (&HTLCOutputInCommitment, Option<&HTLCSource>)> = &mut $confirmed_htlcs_list;
+                                                       let confirmed_htlcs_iter: &mut dyn Iterator<Item = (&HTLCOutputInCommitment, Option<&HTLCSource>)> = &mut $confirmed_htlcs_list;
 
                                                        let mut matched_htlc = false;
                                                        for (ref broadcast_htlc, ref broadcast_source) in confirmed_htlcs_iter {
@@ -2659,18 +2666,59 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                }
        }
 
-       fn broadcast_latest_holder_commitment_txn<B: Deref, L: Deref>(&mut self, broadcaster: &B, logger: &WithChannelMonitor<L>)
-               where B::Target: BroadcasterInterface,
-                       L::Target: Logger,
-       {
-               let commit_txs = self.get_latest_holder_commitment_txn(logger);
-               let mut txs = vec![];
-               for tx in commit_txs.iter() {
-                       log_info!(logger, "Broadcasting local {}", log_tx!(tx));
-                       txs.push(tx);
-               }
-               broadcaster.broadcast_transactions(&txs);
+       fn generate_claimable_outpoints_and_watch_outputs(&mut self) -> (Vec<PackageTemplate>, Vec<TransactionOutputs>) {
+               let funding_outp = HolderFundingOutput::build(
+                       self.funding_redeemscript.clone(),
+                       self.channel_value_satoshis,
+                       self.onchain_tx_handler.channel_type_features().clone()
+               );
+               let commitment_package = PackageTemplate::build_package(
+                       self.funding_info.0.txid.clone(), self.funding_info.0.index as u32,
+                       PackageSolvingData::HolderFundingOutput(funding_outp),
+                       self.best_block.height(), self.best_block.height()
+               );
+               let mut claimable_outpoints = vec![commitment_package];
                self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0));
+               // Although we aren't signing the transaction directly here, the transaction will be signed
+               // in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject
+               // new channel updates.
+               self.holder_tx_signed = true;
+               let mut watch_outputs = Vec::new();
+               // We can't broadcast our HTLC transactions while the commitment transaction is
+               // unconfirmed. We'll delay doing so until we detect the confirmed commitment in
+               // `transactions_confirmed`.
+               if !self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
+                       // Because we're broadcasting a commitment transaction, we should construct the package
+                       // assuming it gets confirmed in the next block. Sadly, we have code which considers
+                       // "not yet confirmed" things as discardable, so we cannot do that here.
+                       let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(
+                               &self.current_holder_commitment_tx, self.best_block.height()
+                       );
+                       let unsigned_commitment_tx = self.onchain_tx_handler.get_unsigned_holder_commitment_tx();
+                       let new_outputs = self.get_broadcasted_holder_watch_outputs(
+                               &self.current_holder_commitment_tx, &unsigned_commitment_tx
+                       );
+                       if !new_outputs.is_empty() {
+                               watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
+                       }
+                       claimable_outpoints.append(&mut new_outpoints);
+               }
+               (claimable_outpoints, watch_outputs)
+       }
+
+       pub(crate) fn queue_latest_holder_commitment_txn_for_broadcast<B: Deref, F: Deref, L: Deref>(
+               &mut self, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &WithChannelMonitor<L>
+       )
+       where
+               B::Target: BroadcasterInterface,
+               F::Target: FeeEstimator,
+               L::Target: Logger,
+       {
+               let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs();
+               self.onchain_tx_handler.update_claims_view_from_requests(
+                       claimable_outpoints, self.best_block.height(), self.best_block.height(), broadcaster,
+                       fee_estimator, logger
+               );
        }
 
        fn update_monitor<B: Deref, F: Deref, L: Deref>(
@@ -2760,26 +2808,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                                        log_trace!(logger, "Avoiding commitment broadcast, already detected confirmed spend onchain");
                                                        continue;
                                                }
-                                               self.broadcast_latest_holder_commitment_txn(broadcaster, logger);
-                                               // If the channel supports anchor outputs, we'll need to emit an external
-                                               // event to be consumed such that a child transaction is broadcast with a
-                                               // high enough feerate for the parent commitment transaction to confirm.
-                                               if self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
-                                                       let funding_output = HolderFundingOutput::build(
-                                                               self.funding_redeemscript.clone(), self.channel_value_satoshis,
-                                                               self.onchain_tx_handler.channel_type_features().clone(),
-                                                       );
-                                                       let best_block_height = self.best_block.height();
-                                                       let commitment_package = PackageTemplate::build_package(
-                                                               self.funding_info.0.txid.clone(), self.funding_info.0.index as u32,
-                                                               PackageSolvingData::HolderFundingOutput(funding_output),
-                                                               best_block_height, best_block_height
-                                                       );
-                                                       self.onchain_tx_handler.update_claims_view_from_requests(
-                                                               vec![commitment_package], best_block_height, best_block_height,
-                                                               broadcaster, &bounded_fee_estimator, logger,
-                                                       );
-                                               }
+                                               self.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &bounded_fee_estimator, logger);
                                        } else if !self.holder_tx_signed {
                                                log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast");
                                                log_error!(logger, "    in channel monitor for channel {}!", &self.funding_info.0.to_channel_id());
@@ -3356,6 +3385,58 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                }
        }
 
+       /// Cancels any existing pending claims for a commitment that previously confirmed and has now
+       /// been replaced by another.
+       pub fn cancel_prev_commitment_claims<L: Deref>(
+               &mut self, logger: &L, confirmed_commitment_txid: &Txid
+       ) where L::Target: Logger {
+               for (counterparty_commitment_txid, _) in &self.counterparty_commitment_txn_on_chain {
+                       // Cancel any pending claims for counterparty commitments we've seen confirm.
+                       if counterparty_commitment_txid == confirmed_commitment_txid {
+                               continue;
+                       }
+                       for (htlc, _) in self.counterparty_claimable_outpoints.get(counterparty_commitment_txid).unwrap_or(&vec![]) {
+                               log_trace!(logger, "Canceling claims for previously confirmed counterparty commitment {}",
+                                       counterparty_commitment_txid);
+                               let mut outpoint = BitcoinOutPoint { txid: *counterparty_commitment_txid, vout: 0 };
+                               if let Some(vout) = htlc.transaction_output_index {
+                                       outpoint.vout = vout;
+                                       self.onchain_tx_handler.abandon_claim(&outpoint);
+                               }
+                       }
+               }
+               if self.holder_tx_signed {
+                       // If we've signed, we may have broadcast either commitment (prev or current), and
+                       // attempted to claim from it immediately without waiting for a confirmation.
+                       if self.current_holder_commitment_tx.txid != *confirmed_commitment_txid {
+                               log_trace!(logger, "Canceling claims for previously broadcast holder commitment {}",
+                                       self.current_holder_commitment_tx.txid);
+                               let mut outpoint = BitcoinOutPoint { txid: self.current_holder_commitment_tx.txid, vout: 0 };
+                               for (htlc, _, _) in &self.current_holder_commitment_tx.htlc_outputs {
+                                       if let Some(vout) = htlc.transaction_output_index {
+                                               outpoint.vout = vout;
+                                               self.onchain_tx_handler.abandon_claim(&outpoint);
+                                       }
+                               }
+                       }
+                       if let Some(prev_holder_commitment_tx) = &self.prev_holder_signed_commitment_tx {
+                               if prev_holder_commitment_tx.txid != *confirmed_commitment_txid {
+                                       log_trace!(logger, "Canceling claims for previously broadcast holder commitment {}",
+                                               prev_holder_commitment_tx.txid);
+                                       let mut outpoint = BitcoinOutPoint { txid: prev_holder_commitment_tx.txid, vout: 0 };
+                                       for (htlc, _, _) in &prev_holder_commitment_tx.htlc_outputs {
+                                               if let Some(vout) = htlc.transaction_output_index {
+                                                       outpoint.vout = vout;
+                                                       self.onchain_tx_handler.abandon_claim(&outpoint);
+                                               }
+                                       }
+                               }
+                       }
+               } else {
+                       // No previous claim.
+               }
+       }
+
        fn get_latest_holder_commitment_txn<L: Deref>(
                &mut self, logger: &WithChannelMonitor<L>,
        ) -> Vec<Transaction> where L::Target: Logger {
@@ -3458,9 +3539,11 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
 
                if height > self.best_block.height() {
                        self.best_block = BestBlock::new(block_hash, height);
+                       log_trace!(logger, "Connecting new block {} at height {}", block_hash, height);
                        self.block_confirmed(height, block_hash, vec![], vec![], vec![], &broadcaster, &fee_estimator, logger)
                } else if block_hash != self.best_block.block_hash() {
                        self.best_block = BestBlock::new(block_hash, height);
+                       log_trace!(logger, "Best block re-orged, replaced with new block {} at height {}", block_hash, height);
                        self.onchain_events_awaiting_threshold_conf.retain(|ref entry| entry.height <= height);
                        self.onchain_tx_handler.block_disconnected(height + 1, broadcaster, fee_estimator, logger);
                        Vec::new()
@@ -3497,6 +3580,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                let mut claimable_outpoints = Vec::new();
                'tx_iter: for tx in &txn_matched {
                        let txid = tx.txid();
+                       log_trace!(logger, "Transaction {} confirmed in block {}", txid , block_hash);
                        // If a transaction has already been confirmed, ensure we don't bother processing it duplicatively.
                        if Some(txid) == self.funding_spend_confirmed {
                                log_debug!(logger, "Skipping redundant processing of funding-spend tx {} as it was previously confirmed", txid);
@@ -3568,6 +3652,10 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                                        commitment_tx_to_counterparty_output,
                                                },
                                        });
+                                       // Now that we've detected a confirmed commitment transaction, attempt to cancel
+                                       // pending claims for any commitments that were previously confirmed such that
+                                       // we don't continue claiming inputs that no longer exist.
+                                       self.cancel_prev_commitment_claims(&logger, &txid);
                                }
                        }
                        if tx.input.len() >= 1 {
@@ -3633,29 +3721,9 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
 
                let should_broadcast = self.should_broadcast_holder_commitment_txn(logger);
                if should_broadcast {
-                       let funding_outp = HolderFundingOutput::build(self.funding_redeemscript.clone(), self.channel_value_satoshis, self.onchain_tx_handler.channel_type_features().clone());
-                       let commitment_package = PackageTemplate::build_package(self.funding_info.0.txid.clone(), self.funding_info.0.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height(), self.best_block.height());
-                       claimable_outpoints.push(commitment_package);
-                       self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0));
-                       // Although we aren't signing the transaction directly here, the transaction will be signed
-                       // in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject
-                       // new channel updates.
-                       self.holder_tx_signed = true;
-                       // We can't broadcast our HTLC transactions while the commitment transaction is
-                       // unconfirmed. We'll delay doing so until we detect the confirmed commitment in
-                       // `transactions_confirmed`.
-                       if !self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
-                               // Because we're broadcasting a commitment transaction, we should construct the package
-                               // assuming it gets confirmed in the next block. Sadly, we have code which considers
-                               // "not yet confirmed" things as discardable, so we cannot do that here.
-                               let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, self.best_block.height());
-                               let unsigned_commitment_tx = self.onchain_tx_handler.get_unsigned_holder_commitment_tx();
-                               let new_outputs = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, &unsigned_commitment_tx);
-                               if !new_outputs.is_empty() {
-                                       watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
-                               }
-                               claimable_outpoints.append(&mut new_outpoints);
-                       }
+                       let (mut new_outpoints, mut new_outputs) = self.generate_claimable_outpoints_and_watch_outputs();
+                       claimable_outpoints.append(&mut new_outpoints);
+                       watch_outputs.append(&mut new_outputs);
                }
 
                // Find which on-chain events have reached their confirmation threshold.
index 2a9583f2edfcb26a17667839894f056a657a79a2..dafce03ddb0d76ca1ca7f828f71b59c557345bab 100644 (file)
@@ -357,7 +357,7 @@ pub struct WatchedOutput {
        pub script_pubkey: ScriptBuf,
 }
 
-impl<T: Listen> Listen for core::ops::Deref<Target = T> {
+impl<T: Listen> Listen for dyn core::ops::Deref<Target = T> {
        fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
                (**self).filtered_block_connected(header, txdata, height);
        }
index bbed782bb57bd0a2e6da0fc4e9ca65ddadde6d5f..59c98f05ebc4018f5915165d05e7be8facc697b9 100644 (file)
@@ -676,6 +676,25 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner> OnchainTxHandler<ChannelSigner>
                None
        }
 
+       pub fn abandon_claim(&mut self, outpoint: &BitcoinOutPoint) {
+               let claim_id = self.claimable_outpoints.get(outpoint).map(|(claim_id, _)| *claim_id)
+                       .or_else(|| {
+                               self.pending_claim_requests.iter()
+                                       .find(|(_, claim)| claim.outpoints().iter().any(|claim_outpoint| *claim_outpoint == outpoint))
+                                       .map(|(claim_id, _)| *claim_id)
+                       });
+               if let Some(claim_id) = claim_id {
+                       if let Some(claim) = self.pending_claim_requests.remove(&claim_id) {
+                               for outpoint in claim.outpoints() {
+                                       self.claimable_outpoints.remove(&outpoint);
+                               }
+                       }
+               } else {
+                       self.locktimed_packages.values_mut().for_each(|claims|
+                               claims.retain(|claim| !claim.outpoints().iter().any(|claim_outpoint| *claim_outpoint == outpoint)));
+               }
+       }
+
        /// Upon channelmonitor.block_connected(..) or upon provision of a preimage on the forward link
        /// for this channel, provide new relevant on-chain transactions and/or new claim requests.
        /// Together with `update_claims_view_from_matched_txn` this used to be named
index 8c6390302f10c945754792189f0b02f4421fd616..1b019ba349e0ae8687a190b92ebd144fe164f6f9 100644 (file)
@@ -35,6 +35,7 @@ use bitcoin::{OutPoint, PubkeyHash, Sequence, ScriptBuf, Transaction, TxIn, TxOu
 use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR;
 use bitcoin::blockdata::locktime::absolute::LockTime;
 use bitcoin::consensus::Encodable;
+use bitcoin::psbt::PartiallySignedTransaction;
 use bitcoin::secp256k1;
 use bitcoin::secp256k1::Secp256k1;
 use bitcoin::secp256k1::ecdsa::Signature;
@@ -343,7 +344,10 @@ pub trait CoinSelectionSource {
        ) -> Result<CoinSelection, ()>;
        /// Signs and provides the full witness for all inputs within the transaction known to the
        /// trait (i.e., any provided via [`CoinSelectionSource::select_confirmed_utxos`]).
-       fn sign_tx(&self, tx: Transaction) -> Result<Transaction, ()>;
+       ///
+       /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the
+       /// unsigned transaction and then sign it with your wallet.
+       fn sign_psbt(&self, psbt: PartiallySignedTransaction) -> Result<Transaction, ()>;
 }
 
 /// An alternative to [`CoinSelectionSource`] that can be implemented and used along [`Wallet`] to
@@ -357,7 +361,10 @@ pub trait WalletSource {
        /// Signs and provides the full [`TxIn::script_sig`] and [`TxIn::witness`] for all inputs within
        /// the transaction known to the wallet (i.e., any provided via
        /// [`WalletSource::list_confirmed_utxos`]).
-       fn sign_tx(&self, tx: Transaction) -> Result<Transaction, ()>;
+       ///
+       /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the
+       /// unsigned transaction and then sign it with your wallet.
+       fn sign_psbt(&self, psbt: PartiallySignedTransaction) -> Result<Transaction, ()>;
 }
 
 /// A wrapper over [`WalletSource`] that implements [`CoinSelection`] by preferring UTXOs that would
@@ -504,8 +511,8 @@ where
                        .or_else(|_| do_coin_selection(true, true))
        }
 
-       fn sign_tx(&self, tx: Transaction) -> Result<Transaction, ()> {
-               self.source.sign_tx(tx)
+       fn sign_psbt(&self, psbt: PartiallySignedTransaction) -> Result<Transaction, ()> {
+               self.source.sign_psbt(psbt)
        }
 }
 
@@ -549,8 +556,8 @@ where
        }
 
        /// Updates a transaction with the result of a successful coin selection attempt.
-       fn process_coin_selection(&self, tx: &mut Transaction, mut coin_selection: CoinSelection) {
-               for utxo in coin_selection.confirmed_utxos.drain(..) {
+       fn process_coin_selection(&self, tx: &mut Transaction, coin_selection: &CoinSelection) {
+               for utxo in coin_selection.confirmed_utxos.iter() {
                        tx.input.push(TxIn {
                                previous_output: utxo.outpoint,
                                script_sig: ScriptBuf::new(),
@@ -558,7 +565,7 @@ where
                                witness: Witness::new(),
                        });
                }
-               if let Some(change_output) = coin_selection.change_output.take() {
+               if let Some(change_output) = coin_selection.change_output.clone() {
                        tx.output.push(change_output);
                } else if tx.output.is_empty() {
                        // We weren't provided a change output, likely because the input set was a perfect
@@ -595,7 +602,7 @@ where
 
                log_debug!(self.logger, "Peforming coin selection for commitment package (commitment and anchor transaction) targeting {} sat/kW",
                        package_target_feerate_sat_per_1000_weight);
-               let coin_selection = self.utxo_source.select_confirmed_utxos(
+               let coin_selection: CoinSelection = self.utxo_source.select_confirmed_utxos(
                        claim_id, must_spend, &[], package_target_feerate_sat_per_1000_weight,
                )?;
 
@@ -613,15 +620,29 @@ where
                let total_input_amount = must_spend_amount +
                        coin_selection.confirmed_utxos.iter().map(|utxo| utxo.output.value).sum::<u64>();
 
-               self.process_coin_selection(&mut anchor_tx, coin_selection);
+               self.process_coin_selection(&mut anchor_tx, &coin_selection);
                let anchor_txid = anchor_tx.txid();
 
-               debug_assert_eq!(anchor_tx.output.len(), 1);
+               // construct psbt
+               let mut anchor_psbt = PartiallySignedTransaction::from_unsigned_tx(anchor_tx).unwrap();
+               // add witness_utxo to anchor input
+               anchor_psbt.inputs[0].witness_utxo = Some(anchor_descriptor.previous_utxo());
+               // add witness_utxo to remaining inputs
+               for (idx, utxo) in coin_selection.confirmed_utxos.into_iter().enumerate() {
+                       // add 1 to skip the anchor input
+                       let index = idx + 1;
+                       debug_assert_eq!(anchor_psbt.unsigned_tx.input[index].previous_output, utxo.outpoint);
+                       if utxo.output.script_pubkey.is_witness_program() {
+                               anchor_psbt.inputs[index].witness_utxo = Some(utxo.output);
+                       }
+               }
+
+               debug_assert_eq!(anchor_psbt.unsigned_tx.output.len(), 1);
                #[cfg(debug_assertions)]
-               let unsigned_tx_weight = anchor_tx.weight().to_wu() - (anchor_tx.input.len() as u64 * EMPTY_SCRIPT_SIG_WEIGHT);
+               let unsigned_tx_weight = anchor_psbt.unsigned_tx.weight().to_wu() - (anchor_psbt.unsigned_tx.input.len() as u64 * EMPTY_SCRIPT_SIG_WEIGHT);
 
                log_debug!(self.logger, "Signing anchor transaction {}", anchor_txid);
-               anchor_tx = self.utxo_source.sign_tx(anchor_tx)?;
+               anchor_tx = self.utxo_source.sign_psbt(anchor_psbt)?;
 
                let signer = anchor_descriptor.derive_channel_signer(&self.signer_provider);
                let anchor_sig = signer.sign_holder_anchor_input(&anchor_tx, 0, &self.secp)?;
@@ -690,7 +711,7 @@ where
                #[cfg(debug_assertions)]
                let must_spend_amount = must_spend.iter().map(|input| input.previous_utxo.value).sum::<u64>();
 
-               let coin_selection = self.utxo_source.select_confirmed_utxos(
+               let coin_selection: CoinSelection = self.utxo_source.select_confirmed_utxos(
                        claim_id, must_spend, &htlc_tx.output, target_feerate_sat_per_1000_weight,
                )?;
 
@@ -701,13 +722,30 @@ where
                let total_input_amount = must_spend_amount +
                        coin_selection.confirmed_utxos.iter().map(|utxo| utxo.output.value).sum::<u64>();
 
-               self.process_coin_selection(&mut htlc_tx, coin_selection);
+               self.process_coin_selection(&mut htlc_tx, &coin_selection);
+
+               // construct psbt
+               let mut htlc_psbt = PartiallySignedTransaction::from_unsigned_tx(htlc_tx).unwrap();
+               // add witness_utxo to htlc inputs
+               for (i, htlc_descriptor) in htlc_descriptors.iter().enumerate() {
+                       debug_assert_eq!(htlc_psbt.unsigned_tx.input[i].previous_output, htlc_descriptor.outpoint());
+                       htlc_psbt.inputs[i].witness_utxo = Some(htlc_descriptor.previous_utxo(&self.secp));
+               }
+               // add witness_utxo to remaining inputs
+               for (idx, utxo) in coin_selection.confirmed_utxos.into_iter().enumerate() {
+                       // offset to skip the htlc inputs
+                       let index = idx + htlc_descriptors.len();
+                       debug_assert_eq!(htlc_psbt.unsigned_tx.input[index].previous_output, utxo.outpoint);
+                       if utxo.output.script_pubkey.is_witness_program() {
+                               htlc_psbt.inputs[index].witness_utxo = Some(utxo.output);
+                       }
+               }
 
                #[cfg(debug_assertions)]
-               let unsigned_tx_weight = htlc_tx.weight().to_wu() - (htlc_tx.input.len() as u64 * EMPTY_SCRIPT_SIG_WEIGHT);
+               let unsigned_tx_weight = htlc_psbt.unsigned_tx.weight().to_wu() - (htlc_psbt.unsigned_tx.input.len() as u64 * EMPTY_SCRIPT_SIG_WEIGHT);
 
-               log_debug!(self.logger, "Signing HTLC transaction {}", htlc_tx.txid());
-               htlc_tx = self.utxo_source.sign_tx(htlc_tx)?;
+               log_debug!(self.logger, "Signing HTLC transaction {}", htlc_psbt.unsigned_tx.txid());
+               htlc_tx = self.utxo_source.sign_psbt(htlc_psbt)?;
 
                let mut signers = BTreeMap::new();
                for (idx, htlc_descriptor) in htlc_descriptors.iter().enumerate() {
index df1bb1a2a2f2ef807f55e72f9bb50f96380e17c7..c42f66da5764ec95c656a9b72e6fe10c6c66d771 100644 (file)
@@ -40,9 +40,8 @@
 #![cfg_attr(not(any(test, fuzzing, feature = "_test_utils")), deny(missing_docs))]
 #![cfg_attr(not(any(test, feature = "_test_utils")), forbid(unsafe_code))]
 
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
-#![deny(broken_intra_doc_links)]
-#![deny(private_intra_doc_links)]
+#![deny(rustdoc::broken_intra_doc_links)]
+#![deny(rustdoc::private_intra_doc_links)]
 
 // In general, rust is absolutely horrid at supporting users doing things like,
 // for example, compiling Rust code for real environments. Disable useless lints
index 19e4b966658e4c73283468035ed050782d61745b..375beb6d66cc0ce20fd30e0325a5ced8a8798bcf 100644 (file)
@@ -5471,7 +5471,7 @@ impl<SP: Deref> Channel<SP> where
                        // larger. If we don't know that time has moved forward, we can just set it to the last
                        // time we saw and it will be ignored.
                        let best_time = self.context.update_time_counter;
-                       match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
+                       match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
                                Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
                                        assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
                                        assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
@@ -8931,7 +8931,7 @@ mod tests {
                assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
        }
 
-       #[cfg(feature = "_test_vectors")]
+       #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
        #[test]
        fn outbound_commitment_test() {
                use bitcoin::sighash;
@@ -8952,7 +8952,7 @@ mod tests {
 
                // Test vectors from BOLT 3 Appendices C and F (anchors):
                let feeest = TestFeeEstimator{fee_est: 15000};
-               let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
+               let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
                let secp_ctx = Secp256k1::new();
 
                let mut signer = InMemorySigner::new(
index da5f4394a4e258d3cdab3224214a70646f10f813..f08096426ff7c9bef04629064e02b27082c0c738 100644 (file)
@@ -1532,13 +1532,11 @@ pub const MIN_FINAL_CLTV_EXPIRY_DELTA: u16 = HTLC_FAIL_BACK_BUFFER as u16 + 3;
 // then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and
 // failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before
 // LATENCY_GRACE_PERIOD_BLOCKS.
-#[deny(const_err)]
 #[allow(dead_code)]
 const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
 
 // Check for ability of an attacker to make us fail on-chain by delaying an HTLC claim. See
 // ChannelMonitor::should_broadcast_holder_commitment_txn for a description of why this is needed.
-#[deny(const_err)]
 #[allow(dead_code)]
 const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
 
@@ -10624,7 +10622,6 @@ where
                        // 0.0.102+
                        for (_, monitor) in args.channel_monitors.iter() {
                                let counterparty_opt = id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id());
-                               let chan_id = monitor.get_funding_txo().0.to_channel_id();
                                if counterparty_opt.is_none() {
                                        let logger = WithChannelMonitor::from(&args.logger, monitor);
                                        for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
index fee3a3b399475d885ee1ac7996f9f246c771eb81..5d8cc59978eb1d8f01379b47be6be41900aa8cd1 100644 (file)
@@ -2273,9 +2273,15 @@ fn channel_monitor_network_test() {
        nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
        {
                let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
                assert_eq!(node_txn.len(), 1);
+               mine_transaction(&nodes[1], &node_txn[0]);
+               if nodes[1].connect_style.borrow().updates_best_block_first() {
+                       let _ = nodes[1].tx_broadcaster.txn_broadcast();
+               }
+
                mine_transaction(&nodes[0], &node_txn[0]);
                check_added_monitors!(nodes[0], 1);
                test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE);
@@ -2284,7 +2290,6 @@ fn channel_monitor_network_test() {
        assert_eq!(nodes[0].node.list_channels().len(), 0);
        assert_eq!(nodes[1].node.list_channels().len(), 1);
        check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
 
        // One pending HTLC is discarded by the force-close:
        let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
@@ -3556,7 +3561,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
                // connect_style.
                return;
        }
-       create_announced_chan_between_nodes(&nodes, 0, 1);
+       let funding_tx = create_announced_chan_between_nodes(&nodes, 0, 1).3;
 
        route_payment(&nodes[0], &[&nodes[1]], 10000000);
        nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
@@ -3565,11 +3570,12 @@ fn test_htlc_ignore_latest_remote_commitment() {
        check_added_monitors!(nodes[0], 1);
        check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
 
-       let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-       assert_eq!(node_txn.len(), 3);
-       assert_eq!(node_txn[0].txid(), node_txn[1].txid());
+       let node_txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
+       assert_eq!(node_txn.len(), 2);
+       check_spends!(node_txn[0], funding_tx);
+       check_spends!(node_txn[1], node_txn[0]);
 
-       let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[1].clone()]);
+       let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone()]);
        connect_block(&nodes[1], &block);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
@@ -3626,7 +3632,7 @@ fn test_force_close_fail_back() {
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
        check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
-       let tx = {
+       let commitment_tx = {
                let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
                // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
                // have a use for it unless nodes[2] learns the preimage somehow, the funds will go
@@ -3635,7 +3641,7 @@ fn test_force_close_fail_back() {
                node_txn.remove(0)
        };
 
-       mine_transaction(&nodes[1], &tx);
+       mine_transaction(&nodes[1], &commitment_tx);
 
        // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
        check_closed_broadcast!(nodes[1], true);
@@ -3647,15 +3653,16 @@ fn test_force_close_fail_back() {
                get_monitor!(nodes[2], payment_event.commitment_msg.channel_id)
                        .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger);
        }
-       mine_transaction(&nodes[2], &tx);
-       let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
-       assert_eq!(node_txn.len(), 1);
-       assert_eq!(node_txn[0].input.len(), 1);
-       assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid());
-       assert_eq!(node_txn[0].lock_time, LockTime::ZERO); // Must be an HTLC-Success
-       assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success
+       mine_transaction(&nodes[2], &commitment_tx);
+       let mut node_txn = nodes[2].tx_broadcaster.txn_broadcast();
+       assert_eq!(node_txn.len(), if nodes[2].connect_style.borrow().updates_best_block_first() { 2 } else { 1 });
+       let htlc_tx = node_txn.pop().unwrap();
+       assert_eq!(htlc_tx.input.len(), 1);
+       assert_eq!(htlc_tx.input[0].previous_output.txid, commitment_tx.txid());
+       assert_eq!(htlc_tx.lock_time, LockTime::ZERO); // Must be an HTLC-Success
+       assert_eq!(htlc_tx.input[0].witness.len(), 5); // Must be an HTLC-Success
 
-       check_spends!(node_txn[0], tx);
+       check_spends!(htlc_tx, commitment_tx);
 }
 
 #[test]
@@ -8568,10 +8575,11 @@ fn test_concurrent_monitor_claim() {
        watchtower_alice.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST);
 
        // Watchtower Alice should have broadcast a commitment/HTLC-timeout
-       let alice_state = {
+       {
                let mut txn = alice_broadcaster.txn_broadcast();
                assert_eq!(txn.len(), 2);
-               txn.remove(0)
+               check_spends!(txn[0], chan_1.3);
+               check_spends!(txn[1], txn[0]);
        };
 
        // Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first.
@@ -8640,11 +8648,8 @@ fn test_concurrent_monitor_claim() {
        check_added_monitors(&nodes[0], 1);
        {
                let htlc_txn = alice_broadcaster.txn_broadcast();
-               assert_eq!(htlc_txn.len(), 2);
+               assert_eq!(htlc_txn.len(), 1);
                check_spends!(htlc_txn[0], bob_state_y);
-               // Alice doesn't clean up the old HTLC claim since it hasn't seen a conflicting spend for
-               // it. However, she should, because it now has an invalid parent.
-               check_spends!(htlc_txn[1], alice_state);
        }
 }
 
@@ -8883,7 +8888,12 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                        assert_eq!(bob_txn.len(), 1);
                        check_spends!(bob_txn[0], txn_to_broadcast[0]);
                } else {
-                       assert_eq!(bob_txn.len(), 2);
+                       if nodes[1].connect_style.borrow().updates_best_block_first() {
+                               assert_eq!(bob_txn.len(), 3);
+                               assert_eq!(bob_txn[0].txid(), bob_txn[1].txid());
+                       } else {
+                               assert_eq!(bob_txn.len(), 2);
+                       }
                        check_spends!(bob_txn[0], chan_ab.3);
                }
        }
@@ -8899,15 +8909,16 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                // If Alice force-closed, Bob only broadcasts a HTLC-output-claiming transaction. Otherwise,
                // Bob force-closed and broadcasts the commitment transaction along with a
                // HTLC-output-claiming transaction.
-               let bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+               let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
                if broadcast_alice {
                        assert_eq!(bob_txn.len(), 1);
                        check_spends!(bob_txn[0], txn_to_broadcast[0]);
                        assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight);
                } else {
-                       assert_eq!(bob_txn.len(), 2);
-                       check_spends!(bob_txn[1], txn_to_broadcast[0]);
-                       assert_eq!(bob_txn[1].input[0].witness.last().unwrap().len(), script_weight);
+                       assert_eq!(bob_txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 3 } else { 2 });
+                       let htlc_tx = bob_txn.pop().unwrap();
+                       check_spends!(htlc_tx, txn_to_broadcast[0]);
+                       assert_eq!(htlc_tx.input[0].witness.last().unwrap().len(), script_weight);
                }
        }
 }
@@ -9383,8 +9394,12 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
                // We should broadcast an HTLC transaction spending our funding transaction first
                let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
                assert_eq!(spending_txn.len(), 2);
-               assert_eq!(spending_txn[0].txid(), node_txn[0].txid());
-               check_spends!(spending_txn[1], node_txn[0]);
+               let htlc_tx = if spending_txn[0].txid() == node_txn[0].txid() {
+                       &spending_txn[1]
+               } else {
+                       &spending_txn[0]
+               };
+               check_spends!(htlc_tx, node_txn[0]);
                // We should also generate a SpendableOutputs event with the to_self output (as its
                // timelock is up).
                let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -9394,7 +9409,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
                // should immediately fail-backwards the HTLC to the previous hop, without waiting for an
                // additional block built on top of the current chain.
                nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
-                       &nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
+                       &nodes[1].get_block_header(conf_height + 1), &[(0, htlc_tx)], conf_height + 1);
                expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
                check_added_monitors!(nodes[1], 1);
 
index 80119b55787c7e75b67d791415e55dba9fb7a18d..74740a6f2279d6549ba57b443b473410e26fbef6 100644 (file)
@@ -737,7 +737,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) {
                commitment_tx
        };
        let commitment_tx_conf_height_a = block_from_scid(&mine_transaction(&nodes[0], &commitment_tx));
-       if anchors && nodes[0].connect_style.borrow().updates_best_block_first() {
+       if nodes[0].connect_style.borrow().updates_best_block_first() {
                let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
                assert_eq!(txn.len(), 1);
                assert_eq!(txn[0].txid(), commitment_tx.txid());
@@ -1998,6 +1998,11 @@ fn do_test_restored_packages_retry(check_old_monitor_retries_after_upgrade: bool
        };
 
        mine_transaction(&nodes[0], &commitment_tx);
+       if nodes[0].connect_style.borrow().updates_best_block_first() {
+               let txn = nodes[0].tx_broadcaster.txn_broadcast();
+               assert_eq!(txn.len(), 1);
+               assert_eq!(txn[0].txid(), commitment_tx.txid());
+       }
 
        // Connect blocks until the HTLC's expiration is met, expecting a transaction broadcast.
        connect_blocks(&nodes[0], TEST_FINAL_CLTV);
@@ -2401,26 +2406,12 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
        nodes[1].node.timer_tick_occurred();
        check_added_monitors(&nodes[1], 2);
        check_closed_event!(&nodes[1], 2, ClosureReason::OutdatedChannelManager, [nodes[0].node.get_our_node_id(); 2], 1000000);
-       let (revoked_commitment_a, revoked_commitment_b) = {
-               let txn = nodes[1].tx_broadcaster.unique_txn_broadcast();
-               assert_eq!(txn.len(), 2);
-               assert_eq!(txn[0].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
-               assert_eq!(txn[1].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
-               if txn[0].input[0].previous_output.txid == chan_a.3.txid() {
-                       check_spends!(&txn[0], &chan_a.3);
-                       check_spends!(&txn[1], &chan_b.3);
-                       (txn[0].clone(), txn[1].clone())
-               } else {
-                       check_spends!(&txn[1], &chan_a.3);
-                       check_spends!(&txn[0], &chan_b.3);
-                       (txn[1].clone(), txn[0].clone())
-               }
-       };
 
        // Bob should now receive two events to bump his revoked commitment transaction fees.
        assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
        let events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events();
        assert_eq!(events.len(), 2);
+       let mut revoked_commitment_txs = Vec::with_capacity(events.len());
        let mut anchor_txs = Vec::with_capacity(events.len());
        for (idx, event) in events.into_iter().enumerate() {
                let utxo_value = Amount::ONE_BTC.to_sat() * (idx + 1) as u64;
@@ -2440,13 +2431,21 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
                };
                let txn = nodes[1].tx_broadcaster.txn_broadcast();
                assert_eq!(txn.len(), 2);
+               assert_eq!(txn[0].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
+               if txn[0].input[0].previous_output.txid == chan_a.3.txid() {
+                       check_spends!(&txn[0], &chan_a.3);
+               } else {
+                       check_spends!(&txn[0], &chan_b.3);
+               }
                let (commitment_tx, anchor_tx) = (&txn[0], &txn[1]);
                check_spends!(anchor_tx, coinbase_tx, commitment_tx);
+
+               revoked_commitment_txs.push(commitment_tx.clone());
                anchor_txs.push(anchor_tx.clone());
        };
 
        for node in &nodes {
-               mine_transactions(node, &[&revoked_commitment_a, &anchor_txs[0], &revoked_commitment_b, &anchor_txs[1]]);
+               mine_transactions(node, &[&revoked_commitment_txs[0], &anchor_txs[0], &revoked_commitment_txs[1], &anchor_txs[1]]);
        }
        check_added_monitors!(&nodes[0], 2);
        check_closed_broadcast(&nodes[0], 2, true);
@@ -2458,7 +2457,7 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
                let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
                assert_eq!(txn.len(), 4);
 
-               let (revoked_htlc_claim_a, revoked_htlc_claim_b) = if txn[0].input[0].previous_output.txid == revoked_commitment_a.txid() {
+               let (revoked_htlc_claim_a, revoked_htlc_claim_b) = if txn[0].input[0].previous_output.txid == revoked_commitment_txs[0].txid() {
                        (if txn[0].input.len() == 2 { &txn[0] } else { &txn[1] }, if txn[2].input.len() == 2 { &txn[2] } else { &txn[3] })
                } else {
                        (if txn[2].input.len() == 2 { &txn[2] } else { &txn[3] }, if txn[0].input.len() == 2 { &txn[0] } else { &txn[1] })
@@ -2466,10 +2465,10 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
 
                assert_eq!(revoked_htlc_claim_a.input.len(), 2); // Spends both HTLC outputs
                assert_eq!(revoked_htlc_claim_a.output.len(), 1);
-               check_spends!(revoked_htlc_claim_a, revoked_commitment_a);
+               check_spends!(revoked_htlc_claim_a, revoked_commitment_txs[0]);
                assert_eq!(revoked_htlc_claim_b.input.len(), 2); // Spends both HTLC outputs
                assert_eq!(revoked_htlc_claim_b.output.len(), 1);
-               check_spends!(revoked_htlc_claim_b, revoked_commitment_b);
+               check_spends!(revoked_htlc_claim_b, revoked_commitment_txs[1]);
        }
 
        // Since Bob was able to confirm his revoked commitment, he'll now try to claim the HTLCs
@@ -2549,7 +2548,7 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
                        sig
                };
                htlc_tx.input[0].witness = Witness::from_slice(&[fee_utxo_sig, public_key.to_bytes()]);
-               check_spends!(htlc_tx, coinbase_tx, revoked_commitment_a, revoked_commitment_b);
+               check_spends!(htlc_tx, coinbase_tx, revoked_commitment_txs[0], revoked_commitment_txs[1]);
                htlc_tx
        };
 
@@ -2608,7 +2607,7 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
                        ).unwrap();
 
                        if let SpendableOutputDescriptor::StaticPaymentOutput(_) = &outputs[0] {
-                               check_spends!(spend_tx, &revoked_commitment_a, &revoked_commitment_b);
+                               check_spends!(spend_tx, &revoked_commitment_txs[0], &revoked_commitment_txs[1]);
                        } else {
                                check_spends!(spend_tx, revoked_claim_transactions.get(&spend_tx.input[0].previous_output.txid).unwrap());
                        }
@@ -2778,7 +2777,7 @@ fn do_test_monitor_claims_with_random_signatures(anchors: bool, confirm_counterp
 
        // If we update the best block to the new height before providing the confirmed transactions,
        // we'll see another broadcast of the commitment transaction.
-       if anchors && !confirm_counterparty_commitment && nodes[0].connect_style.borrow().updates_best_block_first() {
+       if !confirm_counterparty_commitment && nodes[0].connect_style.borrow().updates_best_block_first() {
                let _ = nodes[0].tx_broadcaster.txn_broadcast();
        }
 
@@ -2796,11 +2795,7 @@ fn do_test_monitor_claims_with_random_signatures(anchors: bool, confirm_counterp
        let htlc_timeout_tx = {
                let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
                assert_eq!(txn.len(), 1);
-               let tx = if txn[0].input[0].previous_output.txid == commitment_tx.txid() {
-                       txn[0].clone()
-               } else {
-                       txn[1].clone()
-               };
+               let tx = txn.pop().unwrap();
                check_spends!(tx, commitment_tx, coinbase_tx);
                tx
        };
index 8767e2500bdaa4769cc7b5db18c07ebf258db8f7..5966dce91039db227155264deb0557a2ed2012ff 100644 (file)
@@ -24,6 +24,7 @@ use crate::prelude::*;
 use core::ops::Deref;
 
 /// Invalid inbound onion payment.
+#[derive(Debug)]
 pub struct InboundOnionErr {
        /// BOLT 4 error code.
        pub err_code: u16,
@@ -487,7 +488,7 @@ pub(super) fn check_incoming_htlc_cltv(
 mod tests {
        use bitcoin::hashes::Hash;
        use bitcoin::hashes::sha256::Hash as Sha256;
-       use bitcoin::secp256k1::{PublicKey, SecretKey};
+       use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
        use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
        use crate::ln::ChannelId;
        use crate::ln::channelmanager::RecipientOnionFields;
@@ -497,6 +498,38 @@ mod tests {
        use crate::routing::router::{Path, RouteHop};
        use crate::util::test_utils;
 
+       #[test]
+       fn fail_construct_onion_on_too_big_payloads() {
+               // Ensure that if we call `construct_onion_packet` and friends where payloads are too large for
+               // the allotted packet length, we'll fail to construct. Previously, senders would happily
+               // construct invalid packets by array-shifting the final node's HMAC out of the packet when
+               // adding an intermediate onion layer, causing the receiver to error with "final payload
+               // provided for us as an intermediate node."
+               let secp_ctx = Secp256k1::new();
+               let bob = crate::sign::KeysManager::new(&[2; 32], 42, 42);
+               let bob_pk = PublicKey::from_secret_key(&secp_ctx, &bob.get_node_secret_key());
+               let charlie = crate::sign::KeysManager::new(&[3; 32], 42, 42);
+               let charlie_pk = PublicKey::from_secret_key(&secp_ctx, &charlie.get_node_secret_key());
+
+               let (
+                       session_priv, total_amt_msat, cur_height, mut recipient_onion, keysend_preimage, payment_hash,
+                       prng_seed, hops, ..
+               ) = payment_onion_args(bob_pk, charlie_pk);
+
+               // Ensure the onion will not fit all the payloads by adding a large custom TLV.
+               recipient_onion.custom_tlvs.push((13377331, vec![0; 1156]));
+
+               let path = Path { hops, blinded_tail: None, };
+               let onion_keys = super::onion_utils::construct_onion_keys(&secp_ctx, &path, &session_priv).unwrap();
+               let (onion_payloads, ..) = super::onion_utils::build_onion_payloads(
+                       &path, total_amt_msat, recipient_onion, cur_height + 1, &Some(keysend_preimage)
+               ).unwrap();
+
+               assert!(super::onion_utils::construct_onion_packet(
+                               onion_payloads, onion_keys, prng_seed, &payment_hash
+               ).is_err());
+       }
+
        #[test]
        fn test_peel_payment_onion() {
                use super::*;
index b7da2fd882d516d559e5f6e3e8a26ffe379eeddb..99e3e965a9a9aab167928198db79b82054c98066 100644 (file)
@@ -323,8 +323,6 @@ fn construct_onion_packet_with_init_noise<HD: Writeable, P: Packet>(
 
                let mut pos = 0;
                for (i, (payload, keys)) in payloads.iter().zip(onion_keys.iter()).enumerate() {
-                       if i == payloads.len() - 1 { break; }
-
                        let mut chacha = ChaCha20::new(&keys.rho, &[0u8; 8]);
                        for _ in 0..(packet_data.len() - pos) { // TODO: Batch this.
                                let mut dummy = [0; 1];
@@ -338,6 +336,8 @@ fn construct_onion_packet_with_init_noise<HD: Writeable, P: Packet>(
                                return Err(());
                        }
 
+                       if i == payloads.len() - 1 { break; }
+
                        res.resize(pos, 0u8);
                        chacha.process_in_place(&mut res);
                }
@@ -1023,18 +1023,20 @@ fn decode_next_hop<T, R: ReadableArgs<T>, N: NextPacketBytes>(shared_secret: [u8
                        if hmac == [0; 32] {
                                #[cfg(test)]
                                {
-                                       // In tests, make sure that the initial onion packet data is, at least, non-0.
-                                       // We could do some fancy randomness test here, but, ehh, whatever.
-                                       // This checks for the issue where you can calculate the path length given the
-                                       // onion data as all the path entries that the originator sent will be here
-                                       // as-is (and were originally 0s).
-                                       // Of course reverse path calculation is still pretty easy given naive routing
-                                       // algorithms, but this fixes the most-obvious case.
-                                       let mut next_bytes = [0; 32];
-                                       chacha_stream.read_exact(&mut next_bytes).unwrap();
-                                       assert_ne!(next_bytes[..], [0; 32][..]);
-                                       chacha_stream.read_exact(&mut next_bytes).unwrap();
-                                       assert_ne!(next_bytes[..], [0; 32][..]);
+                                       if chacha_stream.read.position() < hop_data.len() as u64 - 64 {
+                                               // In tests, make sure that the initial onion packet data is, at least, non-0.
+                                               // We could do some fancy randomness test here, but, ehh, whatever.
+                                               // This checks for the issue where you can calculate the path length given the
+                                               // onion data as all the path entries that the originator sent will be here
+                                               // as-is (and were originally 0s).
+                                               // Of course reverse path calculation is still pretty easy given naive routing
+                                               // algorithms, but this fixes the most-obvious case.
+                                               let mut next_bytes = [0; 32];
+                                               chacha_stream.read_exact(&mut next_bytes).unwrap();
+                                               assert_ne!(next_bytes[..], [0; 32][..]);
+                                               chacha_stream.read_exact(&mut next_bytes).unwrap();
+                                               assert_ne!(next_bytes[..], [0; 32][..]);
+                                       }
                                }
                                return Ok((msg, None)); // We are the final destination for this packet
                        } else {
index a5654b30f65db4a3d2d89d9b7d37a8420d6f5040..6af0e63c98bf421bade346f89f1fff303f48e764 100644 (file)
@@ -19,8 +19,9 @@ use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, Mes
 use crate::ln::channel::{EXPIRE_PREV_CONFIG_TICKS, commit_tx_fee_msat, get_holder_selected_channel_reserve_satoshis, ANCHOR_OUTPUT_VALUE_SATOSHI};
 use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo};
 use crate::ln::features::{Bolt11InvoiceFeatures, ChannelTypeFeatures};
-use crate::ln::{msgs, ChannelId, PaymentSecret, PaymentPreimage};
+use crate::ln::{msgs, ChannelId, PaymentHash, PaymentSecret, PaymentPreimage};
 use crate::ln::msgs::ChannelMessageHandler;
+use crate::ln::onion_utils;
 use crate::ln::outbound_payment::{IDEMPOTENCY_TIMEOUT_TICKS, Retry};
 use crate::routing::gossip::{EffectiveCapacity, RoutingFees};
 use crate::routing::router::{get_route, Path, PaymentParameters, Route, Router, RouteHint, RouteHintHop, RouteHop, RouteParameters, find_route};
@@ -31,10 +32,14 @@ use crate::util::errors::APIError;
 use crate::util::ser::Writeable;
 use crate::util::string::UntrustedString;
 
+use bitcoin::hashes::Hash;
+use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::network::constants::Network;
+use bitcoin::secp256k1::{Secp256k1, SecretKey};
 
 use crate::prelude::*;
 
+use crate::ln::functional_test_utils;
 use crate::ln::functional_test_utils::*;
 use crate::routing::gossip::NodeId;
 #[cfg(feature = "std")]
@@ -633,7 +638,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        let nodes_0_deserialized;
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
-       let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+       let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
        let (_, _, chan_id_2, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
 
        // Serialize the ChannelManager prior to sending payments
@@ -745,14 +750,21 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
 
        mine_transaction(&nodes[1], &as_commitment_tx);
-       let bs_htlc_claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-       assert_eq!(bs_htlc_claim_txn.len(), 1);
-       check_spends!(bs_htlc_claim_txn[0], as_commitment_tx);
+       let bs_htlc_claim_txn = {
+               let mut txn = nodes[1].tx_broadcaster.unique_txn_broadcast();
+               assert_eq!(txn.len(), 2);
+               check_spends!(txn[0], funding_tx);
+               check_spends!(txn[1], as_commitment_tx);
+               txn.pop().unwrap()
+       };
 
        if !confirm_before_reload {
                mine_transaction(&nodes[0], &as_commitment_tx);
+               let txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
+               assert_eq!(txn.len(), 1);
+               assert_eq!(txn[0].txid(), as_commitment_tx.txid());
        }
-       mine_transaction(&nodes[0], &bs_htlc_claim_txn[0]);
+       mine_transaction(&nodes[0], &bs_htlc_claim_txn);
        expect_payment_sent(&nodes[0], payment_preimage_1, None, true, false);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV*4 + 20);
        let (first_htlc_timeout_tx, second_htlc_timeout_tx) = {
@@ -762,7 +774,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        };
        check_spends!(first_htlc_timeout_tx, as_commitment_tx);
        check_spends!(second_htlc_timeout_tx, as_commitment_tx);
-       if first_htlc_timeout_tx.input[0].previous_output == bs_htlc_claim_txn[0].input[0].previous_output {
+       if first_htlc_timeout_tx.input[0].previous_output == bs_htlc_claim_txn.input[0].previous_output {
                confirm_transaction(&nodes[0], &second_htlc_timeout_tx);
        } else {
                confirm_transaction(&nodes[0], &first_htlc_timeout_tx);
@@ -914,19 +926,23 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
        // the HTLC-Timeout transaction beyond 1 conf). For dust HTLCs, the HTLC is considered resolved
        // after the commitment transaction, so always connect the commitment transaction.
        mine_transaction(&nodes[0], &bs_commitment_tx[0]);
+       if nodes[0].connect_style.borrow().updates_best_block_first() {
+               let _ = nodes[0].tx_broadcaster.txn_broadcast();
+       }
        mine_transaction(&nodes[1], &bs_commitment_tx[0]);
        if !use_dust {
                connect_blocks(&nodes[0], TEST_FINAL_CLTV + (MIN_CLTV_EXPIRY_DELTA as u32));
                connect_blocks(&nodes[1], TEST_FINAL_CLTV + (MIN_CLTV_EXPIRY_DELTA as u32));
                let as_htlc_timeout = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-               check_spends!(as_htlc_timeout[0], bs_commitment_tx[0]);
                assert_eq!(as_htlc_timeout.len(), 1);
+               check_spends!(as_htlc_timeout[0], bs_commitment_tx[0]);
 
                mine_transaction(&nodes[0], &as_htlc_timeout[0]);
-               // nodes[0] may rebroadcast (or RBF-bump) its HTLC-Timeout, so wipe the announced set.
-               nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
                mine_transaction(&nodes[1], &as_htlc_timeout[0]);
        }
+       if nodes[0].connect_style.borrow().updates_best_block_first() {
+               let _ = nodes[0].tx_broadcaster.txn_broadcast();
+       }
 
        // Create a new channel on which to retry the payment before we fail the payment via the
        // HTLC-Timeout transaction. This avoids ChannelManager timing out the payment due to us
@@ -1044,32 +1060,36 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
 
        // Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
-       let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-       assert_eq!(node_txn.len(), 3);
-       assert_eq!(node_txn[0].txid(), node_txn[1].txid());
-       check_spends!(node_txn[1], funding_tx);
-       check_spends!(node_txn[2], node_txn[1]);
-       let timeout_txn = vec![node_txn[2].clone()];
+       let (commitment_tx, htlc_timeout_tx) = {
+               let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
+               assert_eq!(txn.len(), 2);
+               check_spends!(txn[0], funding_tx);
+               check_spends!(txn[1], txn[0]);
+               (txn.remove(0), txn.remove(0))
+       };
 
        nodes[1].node.claim_funds(payment_preimage);
        check_added_monitors!(nodes[1], 1);
        expect_payment_claimed!(nodes[1], payment_hash, 10_000_000);
 
-       connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[1].clone()]));
+       mine_transaction(&nodes[1], &commitment_tx);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
-       let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-       assert_eq!(claim_txn.len(), 1);
-       check_spends!(claim_txn[0], node_txn[1]);
+       let htlc_success_tx = {
+               let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
+               assert_eq!(txn.len(), 1);
+               check_spends!(txn[0], commitment_tx);
+               txn.pop().unwrap()
+       };
 
-       connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[1].clone()]));
+       mine_transaction(&nodes[0], &commitment_tx);
 
        if confirm_commitment_tx {
                connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
        }
 
-       let claim_block = create_dummy_block(nodes[0].best_block_hash(), 42, if payment_timeout { timeout_txn } else { vec![claim_txn[0].clone()] });
+       let claim_block = create_dummy_block(nodes[0].best_block_hash(), 42, if payment_timeout { vec![htlc_timeout_tx] } else { vec![htlc_success_tx] });
 
        if payment_timeout {
                assert!(confirm_commitment_tx); // Otherwise we're spending below our CSV!
@@ -3334,6 +3354,7 @@ fn test_threaded_payment_retries() {
                // We really want std::thread::scope, but its not stable until 1.63. Until then, we get unsafe.
                let node_ref = NodePtr::from_node(&nodes[0]);
                move || {
+                       let _ = &node_ref;
                        let node_a = unsafe { &*node_ref.0 };
                        while Instant::now() < end_time {
                                node_a.node.get_and_clear_pending_events(); // wipe the PendingHTLCsForwardable
@@ -4194,3 +4215,59 @@ fn  test_htlc_forward_considers_anchor_outputs_value() {
        check_closed_broadcast(&nodes[2], 1, true);
        check_added_monitors(&nodes[2], 1);
 }
+
+#[test]
+fn peel_payment_onion_custom_tlvs() {
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+       create_announced_chan_between_nodes(&nodes, 0, 1);
+       let secp_ctx = Secp256k1::new();
+
+       let amt_msat = 1000;
+       let payment_params = PaymentParameters::for_keysend(nodes[1].node.get_our_node_id(),
+               TEST_FINAL_CLTV, false);
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
+       let route = functional_test_utils::get_route(&nodes[0], &route_params).unwrap();
+       let mut recipient_onion = RecipientOnionFields::spontaneous_empty()
+               .with_custom_tlvs(vec![(414141, vec![42; 1200])]).unwrap();
+       let prng_seed = chanmon_cfgs[0].keys_manager.get_secure_random_bytes();
+       let session_priv = SecretKey::from_slice(&prng_seed[..]).expect("RNG is busted");
+       let keysend_preimage = PaymentPreimage([42; 32]);
+       let payment_hash = PaymentHash(Sha256::hash(&keysend_preimage.0).to_byte_array());
+
+       let (onion_routing_packet, first_hop_msat, cltv_expiry) = onion_utils::create_payment_onion(
+               &secp_ctx, &route.paths[0], &session_priv, amt_msat, recipient_onion.clone(),
+               nodes[0].best_block_info().1, &payment_hash, &Some(keysend_preimage), prng_seed
+       ).unwrap();
+
+       let update_add = msgs::UpdateAddHTLC {
+               channel_id: ChannelId([0; 32]),
+               htlc_id: 42,
+               amount_msat: first_hop_msat,
+               payment_hash,
+               cltv_expiry,
+               skimmed_fee_msat: None,
+               onion_routing_packet,
+               blinding_point: None,
+       };
+       let peeled_onion = crate::ln::onion_payment::peel_payment_onion(
+               &update_add, &&chanmon_cfgs[1].keys_manager, &&chanmon_cfgs[1].logger, &secp_ctx,
+               nodes[1].best_block_info().1, true, false
+       ).unwrap();
+       assert_eq!(peeled_onion.incoming_amt_msat, Some(amt_msat));
+       match peeled_onion.routing {
+               PendingHTLCRouting::ReceiveKeysend {
+                       payment_data, payment_metadata, custom_tlvs, ..
+               } => {
+                       #[cfg(not(c_bindings))]
+                       assert_eq!(&custom_tlvs, recipient_onion.custom_tlvs());
+                       #[cfg(c_bindings)]
+                       assert_eq!(custom_tlvs, recipient_onion.custom_tlvs());
+                       assert!(payment_metadata.is_none());
+                       assert!(payment_data.is_none());
+               },
+               _ => panic!()
+       }
+}
index 6a935acf00d7b6cbb1cebbf4e633db05d65db3b0..a4e0042414f8996f95c08deb72d7a578322fff97 100644 (file)
@@ -2488,7 +2488,6 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
        // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
        // message...
        const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (SocketAddress::MAX_LEN as u32 + 1) / 2;
-       #[deny(const_err)]
        #[allow(dead_code)]
        // ...by failing to compile if the number of addresses that would be half of a message is
        // smaller than 100:
index 223aa5dbac30ba64604992d6b272722ceb3ed1c0..b3d52b78f2b5a5a3921c66873325fde418601b2b 100644 (file)
@@ -1065,9 +1065,10 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
                        confirm_transaction(&nodes[1], &cs_commitment_tx[1]);
                } else {
                        connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1 + 1);
-                       let bs_htlc_timeout_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-                       assert_eq!(bs_htlc_timeout_tx.len(), 1);
-                       confirm_transaction(&nodes[1], &bs_htlc_timeout_tx[0]);
+                       let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
+                       assert_eq!(txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 2 } else { 1 });
+                       let bs_htlc_timeout_tx = txn.pop().unwrap();
+                       confirm_transaction(&nodes[1], &bs_htlc_timeout_tx);
                }
        } else {
                confirm_transaction(&nodes[1], &bs_commitment_tx[0]);
index badb78f245a6651be0abffc415565142e4c7bcf5..cce012aa99203edf4f5b327f22d9cc754226a62e 100644 (file)
@@ -666,6 +666,9 @@ fn test_htlc_preimage_claim_holder_commitment_after_counterparty_commitment_reor
 
        mine_transaction(&nodes[0], &commitment_tx_b);
        mine_transaction(&nodes[1], &commitment_tx_b);
+       if nodes[1].connect_style.borrow().updates_best_block_first() {
+               let _ = nodes[1].tx_broadcaster.txn_broadcast();
+       }
 
        // Provide the preimage now, such that we only claim from the holder commitment (since it's
        // currently confirmed) and not the counterparty's.
@@ -756,3 +759,122 @@ fn test_htlc_preimage_claim_prev_counterparty_commitment_after_current_counterpa
        // commitment (still unrevoked) is the currently confirmed closing transaction.
        assert_eq!(htlc_preimage_tx.input[0].witness.second_to_last().unwrap(), &payment_preimage.0[..]);
 }
+
+fn do_test_retries_own_commitment_broadcast_after_reorg(anchors: bool, revoked_counterparty_commitment: bool) {
+       // Tests that a node will retry broadcasting its own commitment after seeing a confirmed
+       // counterparty commitment be reorged out.
+       let mut chanmon_cfgs = create_chanmon_cfgs(2);
+       if revoked_counterparty_commitment {
+               chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
+       }
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let mut config = test_default_channel_config();
+       if anchors {
+               config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
+               config.manually_accept_inbound_channels = true;
+       }
+       let persister;
+       let new_chain_monitor;
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]);
+       let nodes_1_deserialized;
+       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+       // Route a payment so we have an HTLC to claim as well.
+       let _ = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+
+       if revoked_counterparty_commitment {
+               // Trigger a fee update such that we advance the state. We will have B broadcast its state
+               // without the fee update.
+               let serialized_node = nodes[1].node.encode();
+               let serialized_monitor = get_monitor!(nodes[1], chan_id).encode();
+
+               *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap() += 1;
+               nodes[0].node.timer_tick_occurred();
+               check_added_monitors!(nodes[0], 1);
+
+               let fee_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+               nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &fee_update.update_fee.unwrap());
+               commitment_signed_dance!(nodes[1], nodes[0], fee_update.commitment_signed, false);
+
+               reload_node!(
+                       nodes[1], config, &serialized_node, &[&serialized_monitor], persister, new_chain_monitor, nodes_1_deserialized
+               );
+       }
+
+       // Connect blocks until the HTLC expiry is met, prompting a commitment broadcast by A.
+       connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
+       check_closed_broadcast(&nodes[0], 1, true);
+       check_added_monitors(&nodes[0], 1);
+       check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100_000);
+
+       {
+               let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
+               if anchors {
+                       assert_eq!(txn.len(), 1);
+                       let commitment_tx_a = txn.pop().unwrap();
+                       check_spends!(commitment_tx_a, funding_tx);
+               } else {
+                       assert_eq!(txn.len(), 2);
+                       let htlc_tx_a = txn.pop().unwrap();
+                       let commitment_tx_a = txn.pop().unwrap();
+                       check_spends!(commitment_tx_a, funding_tx);
+                       check_spends!(htlc_tx_a, commitment_tx_a);
+               }
+       };
+
+       // B will also broadcast its own commitment.
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id()).unwrap();
+       check_closed_broadcast(&nodes[1], 1, true);
+       check_added_monitors(&nodes[1], 1);
+       check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100_000);
+
+       let commitment_b = {
+               let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
+               assert_eq!(txn.len(), 1);
+               let tx = txn.pop().unwrap();
+               check_spends!(tx, funding_tx);
+               tx
+       };
+
+       // Confirm B's commitment, A should now broadcast an HTLC timeout for commitment B.
+       mine_transaction(&nodes[0], &commitment_b);
+       {
+               let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
+               if nodes[0].connect_style.borrow().updates_best_block_first() {
+                       // `commitment_a` and `htlc_timeout_a` are rebroadcast because the best block was
+                       // updated prior to seeing `commitment_b`.
+                       assert_eq!(txn.len(), if anchors { 2 } else { 3 });
+                       check_spends!(txn.last().unwrap(), commitment_b);
+               } else {
+                       assert_eq!(txn.len(), 1);
+                       check_spends!(txn[0], commitment_b);
+               }
+       }
+
+       // Disconnect the block, allowing A to retry its own commitment. Note that we connect two
+       // blocks, one to get us back to the original height, and another to retry our pending claims.
+       disconnect_blocks(&nodes[0], 1);
+       connect_blocks(&nodes[0], 2);
+       {
+               let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
+               if anchors {
+                       assert_eq!(txn.len(), 1);
+                       check_spends!(txn[0], funding_tx);
+               } else {
+                       assert_eq!(txn.len(), 2);
+                       check_spends!(txn[0], txn[1]); // HTLC timeout A
+                       check_spends!(txn[1], funding_tx); // Commitment A
+                       assert_ne!(txn[1].txid(), commitment_b.txid());
+               }
+       }
+}
+
+#[test]
+fn test_retries_own_commitment_broadcast_after_reorg() {
+       do_test_retries_own_commitment_broadcast_after_reorg(false, false);
+       do_test_retries_own_commitment_broadcast_after_reorg(false, true);
+       do_test_retries_own_commitment_broadcast_after_reorg(true, false);
+       do_test_retries_own_commitment_broadcast_after_reorg(true, true);
+}
index 21a1b302da09b76fa3f73cbc70953fa46919c3c6..8a44eb2a5beebf3128582bdbce23cc8edfbda381 100644 (file)
@@ -914,7 +914,8 @@ where
        fn peer_disconnected(&self, their_node_id: &PublicKey) {
                match self.message_recipients.lock().unwrap().remove(their_node_id) {
                        Some(OnionMessageRecipient::ConnectedPeer(..)) => {},
-                       _ => debug_assert!(false),
+                       Some(_) => debug_assert!(false),
+                       None => {},
                }
        }
 
index edb372616972770fc1481cd576d99718c32002b0..9b4e41ae174d7b8a73cb8f64b7918f62a0dbf01b 100644 (file)
@@ -990,54 +990,38 @@ impl Readable for ChannelInfo {
 pub struct DirectedChannelInfo<'a> {
        channel: &'a ChannelInfo,
        direction: &'a ChannelUpdateInfo,
-       htlc_maximum_msat: u64,
-       effective_capacity: EffectiveCapacity,
-       /// Outbound from the perspective of `node_one`.
-       ///
-       /// If true, the channel is considered to be outbound from `node_one` perspective.
-       /// If false, the channel is considered to be outbound from `node_two` perspective.
-       ///
-       /// [`ChannelInfo::node_one`]
-       /// [`ChannelInfo::node_two`]
-       outbound: bool,
+       /// The direction this channel is in - if set, it indicates that we're traversing the channel
+       /// from [`ChannelInfo::node_one`] to [`ChannelInfo::node_two`].
+       from_node_one: bool,
 }
 
 impl<'a> DirectedChannelInfo<'a> {
        #[inline]
-       fn new(channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo, outbound: bool) -> Self {
-               let mut htlc_maximum_msat = direction.htlc_maximum_msat;
-               let capacity_msat = channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000);
-
-               let effective_capacity = match capacity_msat {
-                       Some(capacity_msat) => {
-                               htlc_maximum_msat = cmp::min(htlc_maximum_msat, capacity_msat);
-                               EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat }
-                       },
-                       None => EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: htlc_maximum_msat },
-               };
-
-               Self {
-                       channel, direction, htlc_maximum_msat, effective_capacity, outbound
-               }
+       fn new(channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo, from_node_one: bool) -> Self {
+               Self { channel, direction, from_node_one }
        }
 
        /// Returns information for the channel.
        #[inline]
        pub fn channel(&self) -> &'a ChannelInfo { self.channel }
 
-       /// Returns the maximum HTLC amount allowed over the channel in the direction.
-       #[inline]
-       pub fn htlc_maximum_msat(&self) -> u64 {
-               self.htlc_maximum_msat
-       }
-
        /// Returns the [`EffectiveCapacity`] of the channel in the direction.
        ///
        /// This is either the total capacity from the funding transaction, if known, or the
        /// `htlc_maximum_msat` for the direction as advertised by the gossip network, if known,
        /// otherwise.
+       #[inline]
        pub fn effective_capacity(&self) -> EffectiveCapacity {
-               self.effective_capacity
+               let mut htlc_maximum_msat = self.direction().htlc_maximum_msat;
+               let capacity_msat = self.channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000);
+
+               match capacity_msat {
+                       Some(capacity_msat) => {
+                               htlc_maximum_msat = cmp::min(htlc_maximum_msat, capacity_msat);
+                               EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat }
+                       },
+                       None => EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: htlc_maximum_msat },
+               }
        }
 
        /// Returns information for the direction.
@@ -1047,12 +1031,14 @@ impl<'a> DirectedChannelInfo<'a> {
        /// Returns the `node_id` of the source hop.
        ///
        /// Refers to the `node_id` forwarding the payment to the next hop.
-       pub(super) fn source(&self) -> &'a NodeId { if self.outbound { &self.channel.node_one } else { &self.channel.node_two } }
+       #[inline]
+       pub(super) fn source(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_one } else { &self.channel.node_two } }
 
        /// Returns the `node_id` of the target hop.
        ///
        /// Refers to the `node_id` receiving the payment from the previous hop.
-       pub(super) fn target(&self) -> &'a NodeId { if self.outbound { &self.channel.node_two } else { &self.channel.node_one } }
+       #[inline]
+       pub(super) fn target(&self) -> &'a NodeId { if self.from_node_one { &self.channel.node_two } else { &self.channel.node_one } }
 }
 
 impl<'a> fmt::Debug for DirectedChannelInfo<'a> {
index b4cbf3cb234c9eaae6c0757131b596f88c5e7c56..9423078749d39175790a6f298831048db5482a63 100644 (file)
@@ -938,6 +938,10 @@ impl Readable for RouteHint {
 }
 
 /// A channel descriptor for a hop along a payment path.
+///
+/// While this generally comes from BOLT 11's `r` field, this struct includes more fields than are
+/// available in BOLT 11. Thus, encoding and decoding this via `lightning-invoice` is lossy, as
+/// fields not supported in BOLT 11 will be stripped.
 #[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
 pub struct RouteHintHop {
        /// The node_id of the non-target end of the route
@@ -964,33 +968,24 @@ impl_writeable_tlv_based!(RouteHintHop, {
 });
 
 #[derive(Eq, PartialEq)]
+#[repr(align(64))] // Force the size to 64 bytes
 struct RouteGraphNode {
        node_id: NodeId,
-       lowest_fee_to_node: u64,
-       total_cltv_delta: u32,
+       score: u64,
        // The maximum value a yet-to-be-constructed payment path might flow through this node.
        // This value is upper-bounded by us by:
        // - how much is needed for a path being constructed
        // - how much value can channels following this node (up to the destination) can contribute,
        //   considering their capacity and fees
        value_contribution_msat: u64,
-       /// The effective htlc_minimum_msat at this hop. If a later hop on the path had a higher HTLC
-       /// minimum, we use it, plus the fees required at each earlier hop to meet it.
-       path_htlc_minimum_msat: u64,
-       /// All penalties incurred from this hop on the way to the destination, as calculated using
-       /// channel scoring.
-       path_penalty_msat: u64,
+       total_cltv_delta: u32,
        /// The number of hops walked up to this node.
        path_length_to_node: u8,
 }
 
 impl cmp::Ord for RouteGraphNode {
        fn cmp(&self, other: &RouteGraphNode) -> cmp::Ordering {
-               let other_score = cmp::max(other.lowest_fee_to_node, other.path_htlc_minimum_msat)
-                       .saturating_add(other.path_penalty_msat);
-               let self_score = cmp::max(self.lowest_fee_to_node, self.path_htlc_minimum_msat)
-                       .saturating_add(self.path_penalty_msat);
-               other_score.cmp(&self_score).then_with(|| other.node_id.cmp(&self.node_id))
+               other.score.cmp(&self.score).then_with(|| other.node_id.cmp(&self.node_id))
        }
 }
 
@@ -1000,6 +995,16 @@ impl cmp::PartialOrd for RouteGraphNode {
        }
 }
 
+// While RouteGraphNode can be laid out with fewer bytes, performance appears to be improved
+// substantially when it is laid out at exactly 64 bytes.
+//
+// Thus, we use `#[repr(C)]` on the struct to force a suboptimal layout and check that it stays 64
+// bytes here.
+#[cfg(any(ldk_bench, not(any(test, fuzzing))))]
+const _GRAPH_NODE_SMALL: usize = 64 - core::mem::size_of::<RouteGraphNode>();
+#[cfg(any(ldk_bench, not(any(test, fuzzing))))]
+const _GRAPH_NODE_FIXED_SIZE: usize = core::mem::size_of::<RouteGraphNode>() - 64;
+
 /// A wrapper around the various hop representations.
 ///
 /// Can be used to examine the properties of a hop,
@@ -1009,73 +1014,95 @@ pub enum CandidateRouteHop<'a> {
        /// A hop from the payer, where the outbound liquidity is known.
        FirstHop {
                /// Channel details of the first hop
-               /// [`ChannelDetails::get_outbound_payment_scid`] is assumed
-               /// to always return `Some(scid)`
-               /// this assumption is checked in [`find_route`] method.
-               details: &'a ChannelDetails,
-               /// The node id of the payer.
                ///
-               /// Can be accessed via `source` method.
-               node_id: NodeId
+               /// [`ChannelDetails::get_outbound_payment_scid`] MUST be `Some` (indicating the channel
+               /// has been funded and is able to pay), and accessor methods may panic otherwise.
+               ///
+               /// [`find_route`] validates this prior to constructing a [`CandidateRouteHop`].
+               details: &'a ChannelDetails,
+               /// The node id of the payer, which is also the source side of this candidate route hop.
+               payer_node_id: &'a NodeId,
        },
-       /// A hop found in the [`ReadOnlyNetworkGraph`],
-       /// where the channel capacity may be unknown.
+       /// A hop found in the [`ReadOnlyNetworkGraph`].
        PublicHop {
-               /// channel info of the hop.
+               /// Information about the channel, including potentially its capacity and
+               /// direction-specific information.
                info: DirectedChannelInfo<'a>,
-               /// short_channel_id of the channel.
+               /// The short channel ID of the channel, i.e. the identifier by which we refer to this
+               /// channel.
                short_channel_id: u64,
        },
-       /// A hop to the payee found in the BOLT 11 payment invoice,
-       /// though not necessarily a direct
-       /// channel.
+       /// A private hop communicated by the payee, generally via a BOLT 11 invoice.
+       ///
+       /// Because BOLT 11 route hints can take multiple hops to get to the destination, this may not
+       /// terminate at the payee.
        PrivateHop {
-               /// Hint provides information about a private hop,
-               /// needed while routing through a private
-               /// channel.
+               /// Information about the private hop communicated via BOLT 11.
                hint: &'a RouteHintHop,
-               /// Node id of the next hop in route.
-               target_node_id: NodeId
+               /// Node id of the next hop in BOLT 11 route hint.
+               target_node_id: &'a NodeId
        },
-       /// The payee's identity is concealed behind
-       /// blinded paths provided in a BOLT 12 invoice.
+       /// A blinded path which starts with an introduction point and ultimately terminates with the
+       /// payee.
+       ///
+       /// Because we don't know the payee's identity, [`CandidateRouteHop::target`] will return
+       /// `None` in this state.
+       ///
+       /// Because blinded paths are "all or nothing", and we cannot use just one part of a blinded
+       /// path, the full path is treated as a single [`CandidateRouteHop`].
        Blinded {
-               /// Hint provides information about a blinded hop,
-               /// needed while routing through a blinded path.
-               /// `BlindedPayInfo` provides information needed about the
-               /// payment while routing through a blinded path.
-               /// `BlindedPath` is the blinded path to the destination.
+               /// Information about the blinded path including the fee, HTLC amount limits, and
+               /// cryptographic material required to build an HTLC through the given path.
                hint: &'a (BlindedPayInfo, BlindedPath),
                /// Index of the hint in the original list of blinded hints.
-               /// Provided to uniquely identify a hop as we are
-               /// route building.
+               ///
+               /// This is used to cheaply uniquely identify this blinded path, even though we don't have
+               /// a short channel ID for this hop.
                hint_idx: usize,
        },
-       /// Similar to [`Self::Blinded`], but the path here
-       /// has 1 blinded hop. `BlindedPayInfo` provided
-       /// for 1-hop blinded paths is ignored
-       /// because it is meant to apply to the hops *between* the
-       /// introduction node and the destination.
-       /// Useful for tracking that we need to include a blinded
-       /// path at the end of our [`Route`].
+       /// Similar to [`Self::Blinded`], but the path here only has one hop.
+       ///
+       /// While we treat this similarly to [`CandidateRouteHop::Blinded`] in many respects (e.g.
+       /// returning `None` from [`CandidateRouteHop::target`]), in this case we do actually know the
+       /// payee's identity - it's the introduction point!
+       ///
+       /// [`BlindedPayInfo`] provided for 1-hop blinded paths is ignored because it is meant to apply
+       /// to the hops *between* the introduction node and the destination.
+       ///
+       /// This primarily exists to track that we need to included a blinded path at the end of our
+       /// [`Route`], even though it doesn't actually add an additional hop in the payment.
        OneHopBlinded {
-               /// Hint provides information about a single blinded hop,
-               /// needed while routing through a one hop blinded path.
-               /// `BlindedPayInfo` is ignored here.
-               /// `BlindedPath` is the blinded path to the destination.
+               /// Information about the blinded path including the fee, HTLC amount limits, and
+               /// cryptographic material required to build an HTLC terminating with the given path.
+               ///
+               /// Note that the [`BlindedPayInfo`] is ignored here.
                hint: &'a (BlindedPayInfo, BlindedPath),
                /// Index of the hint in the original list of blinded hints.
-               /// Provided to uniquely identify a hop as we are route building.
+               ///
+               /// This is used to cheaply uniquely identify this blinded path, even though we don't have
+               /// a short channel ID for this hop.
                hint_idx: usize,
        },
 }
 
 impl<'a> CandidateRouteHop<'a> {
-       /// Returns short_channel_id if known.
-       /// For `FirstHop` we assume [`ChannelDetails::get_outbound_payment_scid`] is always set, this assumption is checked in
-       /// [`find_route`] method.
-       /// For `Blinded` and `OneHopBlinded` we return `None` because next hop is not known.
-       pub fn short_channel_id(&self) -> Option<u64> {
+       /// Returns the short channel ID for this hop, if one is known.
+       ///
+       /// This SCID could be an alias or a globally unique SCID, and thus is only expected to
+       /// uniquely identify this channel in conjunction with the [`CandidateRouteHop::source`].
+       ///
+       /// Returns `Some` as long as the candidate is a [`CandidateRouteHop::PublicHop`], a
+       /// [`CandidateRouteHop::PrivateHop`] from a BOLT 11 route hint, or a
+       /// [`CandidateRouteHop::FirstHop`] with a known [`ChannelDetails::get_outbound_payment_scid`]
+       /// (which is always true for channels which are funded and ready for use).
+       ///
+       /// In other words, this should always return `Some` as long as the candidate hop is not a
+       /// [`CandidateRouteHop::Blinded`] or a [`CandidateRouteHop::OneHopBlinded`].
+       ///
+       /// Note that this is deliberately not public as it is somewhat of a footgun because it doesn't
+       /// define a global namespace.
+       #[inline]
+       fn short_channel_id(&self) -> Option<u64> {
                match self {
                        CandidateRouteHop::FirstHop { details, .. } => details.get_outbound_payment_scid(),
                        CandidateRouteHop::PublicHop { short_channel_id, .. } => Some(*short_channel_id),
@@ -1085,6 +1112,22 @@ impl<'a> CandidateRouteHop<'a> {
                }
        }
 
+       /// Returns the globally unique short channel ID for this hop, if one is known.
+       ///
+       /// This only returns `Some` if the channel is public (either our own, or one we've learned
+       /// from the public network graph), and thus the short channel ID we have for this channel is
+       /// globally unique and identifies this channel in a global namespace.
+       #[inline]
+       pub fn globally_unique_short_channel_id(&self) -> Option<u64> {
+               match self {
+                       CandidateRouteHop::FirstHop { details, .. } => if details.is_public { details.short_channel_id } else { None },
+                       CandidateRouteHop::PublicHop { short_channel_id, .. } => Some(*short_channel_id),
+                       CandidateRouteHop::PrivateHop { .. } => None,
+                       CandidateRouteHop::Blinded { .. } => None,
+                       CandidateRouteHop::OneHopBlinded { .. } => None,
+               }
+       }
+
        // NOTE: This may alloc memory so avoid calling it in a hot code path.
        fn features(&self) -> ChannelFeatures {
                match self {
@@ -1096,7 +1139,12 @@ impl<'a> CandidateRouteHop<'a> {
                }
        }
 
-       /// Returns cltv_expiry_delta for this hop.
+       /// Returns the required difference in HTLC CLTV expiry between the [`Self::source`] and the
+       /// next-hop for an HTLC taking this hop.
+       ///
+       /// This is the time that the node(s) in this hop have to claim the HTLC on-chain if the
+       /// next-hop goes on chain with a payment preimage.
+       #[inline]
        pub fn cltv_expiry_delta(&self) -> u32 {
                match self {
                        CandidateRouteHop::FirstHop { .. } => 0,
@@ -1107,7 +1155,8 @@ impl<'a> CandidateRouteHop<'a> {
                }
        }
 
-       /// Returns the htlc_minimum_msat for this hop.
+       /// Returns the minimum amount that can be sent over this hop, in millisatoshis.
+       #[inline]
        pub fn htlc_minimum_msat(&self) -> u64 {
                match self {
                        CandidateRouteHop::FirstHop { details, .. } => details.next_outbound_htlc_minimum_msat,
@@ -1118,7 +1167,8 @@ impl<'a> CandidateRouteHop<'a> {
                }
        }
 
-       /// Returns the fees for this hop.
+       /// Returns the fees that must be paid to route an HTLC over this channel.
+       #[inline]
        pub fn fees(&self) -> RoutingFees {
                match self {
                        CandidateRouteHop::FirstHop { .. } => RoutingFees {
@@ -1137,6 +1187,10 @@ impl<'a> CandidateRouteHop<'a> {
                }
        }
 
+       /// Fetch the effective capacity of this hop.
+       ///
+       /// Note that this may be somewhat expensive, so calls to this should be limited and results
+       /// cached!
        fn effective_capacity(&self) -> EffectiveCapacity {
                match self {
                        CandidateRouteHop::FirstHop { details, .. } => EffectiveCapacity::ExactLiquidity {
@@ -1153,10 +1207,11 @@ impl<'a> CandidateRouteHop<'a> {
                }
        }
 
-       ///  Returns the id of this hop.
-       ///  For `Blinded` and `OneHopBlinded` we return `CandidateHopId::Blinded` with `hint_idx` because we don't know the channel id.
-       ///  For any other option we return `CandidateHopId::Clear` because we know the channel id and the direction.
-       pub fn id(&self) -> CandidateHopId {
+       /// Returns an ID describing the given hop.
+       ///
+       /// See the docs on [`CandidateHopId`] for when this is, or is not, unique.
+       #[inline]
+       fn id(&self) -> CandidateHopId {
                match self {
                        CandidateRouteHop::Blinded { hint_idx, .. } => CandidateHopId::Blinded(*hint_idx),
                        CandidateRouteHop::OneHopBlinded { hint_idx, .. } => CandidateHopId::Blinded(*hint_idx),
@@ -1173,12 +1228,13 @@ impl<'a> CandidateRouteHop<'a> {
        }
        /// Returns the source node id of current hop.
        ///
-       /// Source node id refers to the hop forwarding the payment.
+       /// Source node id refers to the node forwarding the HTLC through this hop.
        ///
-       /// For `FirstHop` we return payer's node id.
+       /// For [`Self::FirstHop`] we return payer's node id.
+       #[inline]
        pub fn source(&self) -> NodeId {
                match self {
-                       CandidateRouteHop::FirstHop { node_id, .. } => *node_id,
+                       CandidateRouteHop::FirstHop { payer_node_id, .. } => **payer_node_id,
                        CandidateRouteHop::PublicHop { info, .. } => *info.source(),
                        CandidateRouteHop::PrivateHop { hint, .. } => hint.src_node_id.into(),
                        CandidateRouteHop::Blinded { hint, .. } => hint.1.introduction_node_id.into(),
@@ -1187,26 +1243,36 @@ impl<'a> CandidateRouteHop<'a> {
        }
        /// Returns the target node id of this hop, if known.
        ///
-       /// Target node id refers to the hop receiving the payment.
+       /// Target node id refers to the node receiving the HTLC after this hop.
+       ///
+       /// For [`Self::Blinded`] we return `None` because the ultimate destination after the blinded
+       /// path is unknown.
        ///
-       /// For `Blinded` and `OneHopBlinded` we return `None` because next hop is blinded.
-       pub fn target(&self) -> Option<NodeId> {
+       /// For [`Self::OneHopBlinded`] we return `None` because the target is the same as the source,
+       /// and such a return value would be somewhat nonsensical.
+       #[inline]
+       pub fn target(&self) -> Option<NodeId> {
                match self {
                        CandidateRouteHop::FirstHop { details, .. } => Some(details.counterparty.node_id.into()),
                        CandidateRouteHop::PublicHop { info, .. } => Some(*info.target()),
-                       CandidateRouteHop::PrivateHop { target_node_id, .. } => Some(*target_node_id),
+                       CandidateRouteHop::PrivateHop { target_node_id, .. } => Some(**target_node_id),
                        CandidateRouteHop::Blinded { .. } => None,
                        CandidateRouteHop::OneHopBlinded { .. } => None,
                }
        }
 }
 
-/// A wrapper around the various hop id representations.
+/// A unique(ish) identifier for a specific [`CandidateRouteHop`].
+///
+/// For blinded paths, this ID is unique only within a given [`find_route`] call.
+///
+/// For other hops, because SCIDs between private channels and public channels can conflict, this
+/// isn't guaranteed to be unique at all.
 ///
-/// `CandidateHopId::Clear` is used to identify a hop with a known short channel id and direction.
-/// `CandidateHopId::Blinded` is used to identify a blinded hop `hint_idx`.
+/// For our uses, this is generally fine, but it is not public as it is otherwise a rather
+/// difficult-to-use API.
 #[derive(Clone, Copy, Eq, Hash, Ord, PartialOrd, PartialEq)]
-pub enum CandidateHopId {
+enum CandidateHopId {
        /// Contains (scid, src_node_id < target_node_id)
        Clear((u64, bool)),
        /// Index of the blinded route hint in [`Payee::Blinded::route_hints`].
@@ -1246,15 +1312,15 @@ fn iter_equal<I1: Iterator, I2: Iterator>(mut iter_a: I1, mut iter_b: I2)
 /// Fee values should be updated only in the context of the whole path, see update_value_and_recompute_fees.
 /// These fee values are useful to choose hops as we traverse the graph "payee-to-payer".
 #[derive(Clone)]
+#[repr(C)] // Force fields to appear in the order we define them.
 struct PathBuildingHop<'a> {
        candidate: CandidateRouteHop<'a>,
-       fee_msat: u64,
-
-       /// All the fees paid *after* this channel on the way to the destination
-       next_hops_fee_msat: u64,
-       /// Fee paid for the use of the current channel (see candidate.fees()).
-       /// The value will be actually deducted from the counterparty balance on the previous link.
-       hop_use_fee_msat: u64,
+       /// If we've already processed a node as the best node, we shouldn't process it again. Normally
+       /// we'd just ignore it if we did as all channels would have a higher new fee, but because we
+       /// may decrease the amounts in use as we walk the graph, the actual calculated fee may
+       /// decrease as well. Thus, we have to explicitly track which nodes have been processed and
+       /// avoid processing them again.
+       was_processed: bool,
        /// Used to compare channels when choosing the for routing.
        /// Includes paying for the use of a hop and the following hops, as well as
        /// an estimated cost of reaching this hop.
@@ -1266,12 +1332,20 @@ struct PathBuildingHop<'a> {
        /// All penalties incurred from this channel on the way to the destination, as calculated using
        /// channel scoring.
        path_penalty_msat: u64,
-       /// If we've already processed a node as the best node, we shouldn't process it again. Normally
-       /// we'd just ignore it if we did as all channels would have a higher new fee, but because we
-       /// may decrease the amounts in use as we walk the graph, the actual calculated fee may
-       /// decrease as well. Thus, we have to explicitly track which nodes have been processed and
-       /// avoid processing them again.
-       was_processed: bool,
+
+       // The last 16 bytes are on the next cache line by default in glibc's malloc. Thus, we should
+       // only place fields which are not hot there. Luckily, the next three fields are only read if
+       // we end up on the selected path, and only in the final path layout phase, so we don't care
+       // too much if reading them is slow.
+
+       fee_msat: u64,
+
+       /// All the fees paid *after* this channel on the way to the destination
+       next_hops_fee_msat: u64,
+       /// Fee paid for the use of the current channel (see candidate.fees()).
+       /// The value will be actually deducted from the counterparty balance on the previous link.
+       hop_use_fee_msat: u64,
+
        #[cfg(all(not(ldk_bench), any(test, fuzzing)))]
        // In tests, we apply further sanity checks on cases where we skip nodes we already processed
        // to ensure it is specifically in cases where the fee has gone down because of a decrease in
@@ -1280,6 +1354,18 @@ struct PathBuildingHop<'a> {
        value_contribution_msat: u64,
 }
 
+// Checks that the entries in the `find_route` `dist` map fit in (exactly) two standard x86-64
+// cache lines. Sadly, they're not guaranteed to actually lie on a cache line (and in fact,
+// generally won't, because at least glibc's malloc will align to a nice, big, round
+// boundary...plus 16), but at least it will reduce the amount of data we'll need to load.
+//
+// Note that these assertions only pass on somewhat recent rustc, and thus are gated on the
+// ldk_bench flag.
+#[cfg(ldk_bench)]
+const _NODE_MAP_SIZE_TWO_CACHE_LINES: usize = 128 - core::mem::size_of::<(NodeId, PathBuildingHop)>();
+#[cfg(ldk_bench)]
+const _NODE_MAP_SIZE_EXACTLY_CACHE_LINES: usize = core::mem::size_of::<(NodeId, PathBuildingHop)>() - 128;
+
 impl<'a> core::fmt::Debug for PathBuildingHop<'a> {
        fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
                let mut debug_struct = f.debug_struct("PathBuildingHop");
@@ -1734,6 +1820,20 @@ where L::Target: Logger {
                }
        }
 
+       let mut private_hop_key_cache = HashMap::with_capacity(
+               payment_params.payee.unblinded_route_hints().iter().map(|path| path.0.len()).sum()
+       );
+
+       // Because we store references to private hop node_ids in `dist`, below, we need them to exist
+       // (as `NodeId`, not `PublicKey`) for the lifetime of `dist`. Thus, we calculate all the keys
+       // we'll need here and simply fetch them when routing.
+       private_hop_key_cache.insert(maybe_dummy_payee_pk, NodeId::from_pubkey(&maybe_dummy_payee_pk));
+       for route in payment_params.payee.unblinded_route_hints().iter() {
+               for hop in route.0.iter() {
+                       private_hop_key_cache.insert(hop.src_node_id, NodeId::from_pubkey(&hop.src_node_id));
+               }
+       }
+
        // The main heap containing all candidate next-hops sorted by their score (max(fee,
        // htlc_minimum)). Ideally this would be a heap which allowed cheap score reduction instead of
        // adding duplicate entries when we find a better path to a given node.
@@ -2021,15 +2121,6 @@ where L::Target: Logger {
                                                                                score_params);
                                                                let path_penalty_msat = $next_hops_path_penalty_msat
                                                                        .saturating_add(channel_penalty_msat);
-                                                               let new_graph_node = RouteGraphNode {
-                                                                       node_id: src_node_id,
-                                                                       lowest_fee_to_node: total_fee_msat,
-                                                                       total_cltv_delta: hop_total_cltv_delta,
-                                                                       value_contribution_msat,
-                                                                       path_htlc_minimum_msat,
-                                                                       path_penalty_msat,
-                                                                       path_length_to_node,
-                                                               };
 
                                                                // Update the way of reaching $candidate.source()
                                                                // with the given short_channel_id (from $candidate.target()),
@@ -2054,6 +2145,13 @@ where L::Target: Logger {
                                                                        .saturating_add(path_penalty_msat);
 
                                                                if !old_entry.was_processed && new_cost < old_cost {
+                                                                       let new_graph_node = RouteGraphNode {
+                                                                               node_id: src_node_id,
+                                                                               score: cmp::max(total_fee_msat, path_htlc_minimum_msat).saturating_add(path_penalty_msat),
+                                                                               total_cltv_delta: hop_total_cltv_delta,
+                                                                               value_contribution_msat,
+                                                                               path_length_to_node,
+                                                                       };
                                                                        targets.push(new_graph_node);
                                                                        old_entry.next_hops_fee_msat = $next_hops_fee_msat;
                                                                        old_entry.hop_use_fee_msat = hop_use_fee_msat;
@@ -2127,28 +2225,38 @@ where L::Target: Logger {
        // meaning how much will be paid in fees after this node (to the best of our knowledge).
        // This data can later be helpful to optimize routing (pay lower fees).
        macro_rules! add_entries_to_cheapest_to_target_node {
-               ( $node: expr, $node_id: expr, $fee_to_target_msat: expr, $next_hops_value_contribution: expr,
-                 $next_hops_path_htlc_minimum_msat: expr, $next_hops_path_penalty_msat: expr,
+               ( $node: expr, $node_id: expr, $next_hops_value_contribution: expr,
                  $next_hops_cltv_delta: expr, $next_hops_path_length: expr ) => {
+                       let fee_to_target_msat;
+                       let next_hops_path_htlc_minimum_msat;
+                       let next_hops_path_penalty_msat;
                        let skip_node = if let Some(elem) = dist.get_mut(&$node_id) {
                                let was_processed = elem.was_processed;
                                elem.was_processed = true;
+                               fee_to_target_msat = elem.total_fee_msat;
+                               next_hops_path_htlc_minimum_msat = elem.path_htlc_minimum_msat;
+                               next_hops_path_penalty_msat = elem.path_penalty_msat;
                                was_processed
                        } else {
                                // Entries are added to dist in add_entry!() when there is a channel from a node.
                                // Because there are no channels from payee, it will not have a dist entry at this point.
                                // If we're processing any other node, it is always be the result of a channel from it.
                                debug_assert_eq!($node_id, maybe_dummy_payee_node_id);
+                               fee_to_target_msat = 0;
+                               next_hops_path_htlc_minimum_msat = 0;
+                               next_hops_path_penalty_msat = 0;
                                false
                        };
 
                        if !skip_node {
                                if let Some(first_channels) = first_hop_targets.get(&$node_id) {
                                        for details in first_channels {
-                                               let candidate = CandidateRouteHop::FirstHop { details, node_id: our_node_id };
-                                               add_entry!(&candidate, $fee_to_target_msat,
+                                               let candidate = CandidateRouteHop::FirstHop {
+                                                       details, payer_node_id: &our_node_id,
+                                               };
+                                               add_entry!(&candidate, fee_to_target_msat,
                                                        $next_hops_value_contribution,
-                                                       $next_hops_path_htlc_minimum_msat, $next_hops_path_penalty_msat,
+                                                       next_hops_path_htlc_minimum_msat, next_hops_path_penalty_msat,
                                                        $next_hops_cltv_delta, $next_hops_path_length);
                                        }
                                }
@@ -2171,10 +2279,10 @@ where L::Target: Logger {
                                                                                        short_channel_id: *chan_id,
                                                                                };
                                                                                add_entry!(&candidate,
-                                                                                       $fee_to_target_msat,
+                                                                                       fee_to_target_msat,
                                                                                        $next_hops_value_contribution,
-                                                                                       $next_hops_path_htlc_minimum_msat,
-                                                                                       $next_hops_path_penalty_msat,
+                                                                                       next_hops_path_htlc_minimum_msat,
+                                                                                       next_hops_path_penalty_msat,
                                                                                        $next_hops_cltv_delta, $next_hops_path_length);
                                                                        }
                                                                }
@@ -2200,7 +2308,9 @@ where L::Target: Logger {
                // place where it could be added.
                payee_node_id_opt.map(|payee| first_hop_targets.get(&payee).map(|first_channels| {
                        for details in first_channels {
-                               let candidate = CandidateRouteHop::FirstHop { details, node_id: our_node_id };
+                               let candidate = CandidateRouteHop::FirstHop {
+                                       details, payer_node_id: &our_node_id,
+                               };
                                let added = add_entry!(&candidate, 0, path_value_msat,
                                                                        0, 0u64, 0, 0).is_some();
                                log_trace!(logger, "{} direct route to payee via {}",
@@ -2217,7 +2327,7 @@ where L::Target: Logger {
                        // If not, targets.pop() will not even let us enter the loop in step 2.
                        None => {},
                        Some(node) => {
-                               add_entries_to_cheapest_to_target_node!(node, payee, 0, path_value_msat, 0, 0u64, 0, 0);
+                               add_entries_to_cheapest_to_target_node!(node, payee, path_value_msat, 0, 0);
                        },
                });
 
@@ -2247,7 +2357,9 @@ where L::Target: Logger {
                                sort_first_hop_channels(first_channels, &used_liquidities, recommended_value_msat,
                                        our_node_pubkey);
                                for details in first_channels {
-                                       let first_hop_candidate = CandidateRouteHop::FirstHop { details, node_id: our_node_id};
+                                       let first_hop_candidate = CandidateRouteHop::FirstHop {
+                                               details, payer_node_id: &our_node_id,
+                                       };
                                        let blinded_path_fee = match compute_fees(path_contribution_msat, candidate.fees()) {
                                                Some(fee) => fee,
                                                None => continue
@@ -2286,8 +2398,7 @@ where L::Target: Logger {
                                let mut aggregate_path_contribution_msat = path_value_msat;
 
                                for (idx, (hop, prev_hop_id)) in hop_iter.zip(prev_hop_iter).enumerate() {
-                                       let source = NodeId::from_pubkey(&hop.src_node_id);
-                                       let target = NodeId::from_pubkey(&prev_hop_id);
+                                       let target = private_hop_key_cache.get(&prev_hop_id).unwrap();
 
                                        if let Some(first_channels) = first_hop_targets.get(&target) {
                                                if first_channels.iter().any(|d| d.outbound_scid_alias == Some(hop.short_channel_id)) {
@@ -2344,7 +2455,9 @@ where L::Target: Logger {
                                                sort_first_hop_channels(first_channels, &used_liquidities,
                                                        recommended_value_msat, our_node_pubkey);
                                                for details in first_channels {
-                                                       let first_hop_candidate = CandidateRouteHop::FirstHop { details, node_id: our_node_id};
+                                                       let first_hop_candidate = CandidateRouteHop::FirstHop {
+                                                               details, payer_node_id: &our_node_id,
+                                                       };
                                                        add_entry!(&first_hop_candidate,
                                                                aggregate_next_hops_fee_msat, aggregate_path_contribution_msat,
                                                                aggregate_next_hops_path_htlc_minimum_msat, aggregate_next_hops_path_penalty_msat,
@@ -2389,7 +2502,9 @@ where L::Target: Logger {
                                                        sort_first_hop_channels(first_channels, &used_liquidities,
                                                                recommended_value_msat, our_node_pubkey);
                                                        for details in first_channels {
-                                                               let first_hop_candidate = CandidateRouteHop::FirstHop { details, node_id: our_node_id};
+                                                               let first_hop_candidate = CandidateRouteHop::FirstHop {
+                                                                       details, payer_node_id: &our_node_id,
+                                                               };
                                                                add_entry!(&first_hop_candidate,
                                                                        aggregate_next_hops_fee_msat,
                                                                        aggregate_path_contribution_msat,
@@ -2419,7 +2534,7 @@ where L::Target: Logger {
                // Both these cases (and other cases except reaching recommended_value_msat) mean that
                // paths_collection will be stopped because found_new_path==false.
                // This is not necessarily a routing failure.
-               'path_construction: while let Some(RouteGraphNode { node_id, lowest_fee_to_node, total_cltv_delta, mut value_contribution_msat, path_htlc_minimum_msat, path_penalty_msat, path_length_to_node, .. }) = targets.pop() {
+               'path_construction: while let Some(RouteGraphNode { node_id, total_cltv_delta, mut value_contribution_msat, path_length_to_node, .. }) = targets.pop() {
 
                        // Since we're going payee-to-payer, hitting our node as a target means we should stop
                        // traversing the graph and arrange the path out of what we found.
@@ -2432,8 +2547,10 @@ where L::Target: Logger {
                                        let target = ordered_hops.last().unwrap().0.candidate.target().unwrap_or(maybe_dummy_payee_node_id);
                                        if let Some(first_channels) = first_hop_targets.get(&target) {
                                                for details in first_channels {
-                                                       if let Some(scid) = ordered_hops.last().unwrap().0.candidate.short_channel_id() {
-                                                               if details.get_outbound_payment_scid().unwrap() == scid {
+                                                       if let CandidateRouteHop::FirstHop { details: last_hop_details, .. }
+                                                               = ordered_hops.last().unwrap().0.candidate
+                                                       {
+                                                               if details.get_outbound_payment_scid() == last_hop_details.get_outbound_payment_scid() {
                                                                        ordered_hops.last_mut().unwrap().1 = details.counterparty.features.to_context();
                                                                        features_set = true;
                                                                        break;
@@ -2552,8 +2669,8 @@ where L::Target: Logger {
                        match network_nodes.get(&node_id) {
                                None => {},
                                Some(node) => {
-                                       add_entries_to_cheapest_to_target_node!(node, node_id, lowest_fee_to_node,
-                                               value_contribution_msat, path_htlc_minimum_msat, path_penalty_msat,
+                                       add_entries_to_cheapest_to_target_node!(node, node_id,
+                                               value_contribution_msat,
                                                total_cltv_delta, path_length_to_node);
                                },
                        }
@@ -2682,8 +2799,8 @@ where L::Target: Logger {
        });
        for idx in 0..(selected_route.len() - 1) {
                if idx + 1 >= selected_route.len() { break; }
-               if iter_equal(selected_route[idx].hops.iter().map(|h| (h.0.candidate.id(), h.0.candidate.target())),
-                                                                       selected_route[idx + 1].hops.iter().map(|h| (h.0.candidate.id(), h.0.candidate.target()))) {
+               if iter_equal(selected_route[idx    ].hops.iter().map(|h| (h.0.candidate.id(), h.0.candidate.target())),
+                             selected_route[idx + 1].hops.iter().map(|h| (h.0.candidate.id(), h.0.candidate.target()))) {
                        let new_value = selected_route[idx].get_value_msat() + selected_route[idx + 1].get_value_msat();
                        selected_route[idx].update_value_and_recompute_fees(new_value);
                        selected_route.remove(idx + 1);
index 5d637a859903bfa34f849328ac366517b541394b..337fd8eec7a895ce5198edb3a9774267048de03d 100644 (file)
@@ -1344,13 +1344,11 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ScoreLookUp for Prob
        fn channel_penalty_msat(
                &self, candidate: &CandidateRouteHop, usage: ChannelUsage, score_params: &ProbabilisticScoringFeeParameters
        ) -> u64 {
-               let scid = match candidate.short_channel_id() {
-                       Some(scid) => scid,
-                       None => return 0,
-               };
-               let target = match candidate.target() {
-                       Some(target) => target,
-                       None => return 0,
+               let (scid, target) = match candidate {
+                       CandidateRouteHop::PublicHop { info, short_channel_id } => {
+                               (short_channel_id, info.target())
+                       },
+                       _ => return 0,
                };
                let source = candidate.source();
                if let Some(penalty) = score_params.manual_node_penalties.get(&target) {
@@ -2244,10 +2242,6 @@ mod tests {
                PublicKey::from_secret_key(&secp_ctx, &recipient_privkey())
        }
 
-       fn sender_node_id() -> NodeId {
-               NodeId::from_pubkey(&sender_pubkey())
-       }
-
        fn recipient_node_id() -> NodeId {
                NodeId::from_pubkey(&recipient_pubkey())
        }
@@ -2902,7 +2896,7 @@ mod tests {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
                };
                let channel = network_graph.read_only().channel(42).unwrap().to_owned();
-               let (info, target) = channel.as_directed_from(&source).unwrap();
+               let (info, _) = channel.as_directed_from(&source).unwrap();
                let candidate = CandidateRouteHop::PublicHop {
                        info,
                        short_channel_id: 42,
@@ -3002,7 +2996,7 @@ mod tests {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_000 },
                };
                let channel = network_graph.read_only().channel(42).unwrap().to_owned();
-               let (info, target) = channel.as_directed_from(&source).unwrap();
+               let (info, _) = channel.as_directed_from(&source).unwrap();
                let candidate = CandidateRouteHop::PublicHop {
                        info,
                        short_channel_id: 42,
@@ -3388,6 +3382,7 @@ mod tests {
                assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), u64::max_value());
        }
 
+       #[test]
        fn remembers_historical_failures() {
                let logger = TestLogger::new();
                let network_graph = network_graph(&logger);
@@ -3402,6 +3397,7 @@ mod tests {
                };
                let mut scorer = ProbabilisticScorer::new(decay_params, &network_graph, &logger);
                let source = source_node_id();
+               let target = target_node_id();
 
                let usage = ChannelUsage {
                        amount_msat: 100,
@@ -3413,24 +3409,37 @@ mod tests {
                        inflight_htlc_msat: 0,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
                };
-               let network_graph = network_graph.read_only();
-               let channel = network_graph.channel(42).unwrap();
-               let (info, target) = channel.as_directed_from(&source).unwrap();
-               let candidate = CandidateRouteHop::PublicHop {
-                       info,
-                       short_channel_id: 42,
-               };
 
-               // With no historical data the normal liquidity penalty calculation is used.
-               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 168);
+               {
+                       let network_graph = network_graph.read_only();
+                       let channel = network_graph.channel(42).unwrap();
+                       let (info, _) = channel.as_directed_from(&source).unwrap();
+                       let candidate = CandidateRouteHop::PublicHop {
+                               info,
+                               short_channel_id: 42,
+                       };
+
+                       // With no historical data the normal liquidity penalty calculation is used.
+                       assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 168);
+               }
                assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
                None);
                assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 42, &params),
                None);
 
                scorer.payment_path_failed(&payment_path_for_amount(1), 42);
-               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 2048);
-               assert_eq!(scorer.channel_penalty_msat(&candidate, usage_1, &params), 249);
+               {
+                       let network_graph = network_graph.read_only();
+                       let channel = network_graph.channel(42).unwrap();
+                       let (info, _) = channel.as_directed_from(&source).unwrap();
+                       let candidate = CandidateRouteHop::PublicHop {
+                               info,
+                               short_channel_id: 42,
+                       };
+
+                       assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 2048);
+                       assert_eq!(scorer.channel_penalty_msat(&candidate, usage_1, &params), 249);
+               }
                // The "it failed" increment is 32, where the probability should lie several buckets into
                // the first octile.
                assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
@@ -3444,7 +3453,17 @@ mod tests {
                // Even after we tell the scorer we definitely have enough available liquidity, it will
                // still remember that there was some failure in the past, and assign a non-0 penalty.
                scorer.payment_path_failed(&payment_path_for_amount(1000), 43);
-               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 105);
+               {
+                       let network_graph = network_graph.read_only();
+                       let channel = network_graph.channel(42).unwrap();
+                       let (info, _) = channel.as_directed_from(&source).unwrap();
+                       let candidate = CandidateRouteHop::PublicHop {
+                               info,
+                               short_channel_id: 42,
+                       };
+
+                       assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 105);
+               }
                // The first points should be decayed just slightly and the last bucket has a new point.
                assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
                        Some(([31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0],
@@ -3464,7 +3483,17 @@ mod tests {
                // Advance the time forward 16 half-lives (which the docs claim will ensure all data is
                // gone), and check that we're back to where we started.
                SinceEpoch::advance(Duration::from_secs(10 * 16));
-               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 168);
+               {
+                       let network_graph = network_graph.read_only();
+                       let channel = network_graph.channel(42).unwrap();
+                       let (info, _) = channel.as_directed_from(&source).unwrap();
+                       let candidate = CandidateRouteHop::PublicHop {
+                               info,
+                               short_channel_id: 42,
+                       };
+
+                       assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 168);
+               }
                // Once fully decayed we still have data, but its all-0s. In the future we may remove the
                // data entirely instead.
                assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
@@ -3477,16 +3506,26 @@ mod tests {
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
                };
                scorer.payment_path_failed(&payment_path_for_amount(1), 42);
-               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 2050);
-               usage.inflight_htlc_msat = 0;
-               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 866);
+               {
+                       let network_graph = network_graph.read_only();
+                       let channel = network_graph.channel(42).unwrap();
+                       let (info, _) = channel.as_directed_from(&source).unwrap();
+                       let candidate = CandidateRouteHop::PublicHop {
+                               info,
+                               short_channel_id: 42,
+                       };
 
-               let usage = ChannelUsage {
-                       amount_msat: 1,
-                       inflight_htlc_msat: 0,
-                       effective_capacity: EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: 0 },
-               };
-               assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 2048);
+                       assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 2050);
+                       usage.inflight_htlc_msat = 0;
+                       assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 866);
+
+                       let usage = ChannelUsage {
+                               amount_msat: 1,
+                               inflight_htlc_msat: 0,
+                               effective_capacity: EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: 0 },
+                       };
+                       assert_eq!(scorer.channel_penalty_msat(&candidate, usage, &params), 2048);
+               }
 
                // Advance to decay all liquidity offsets to zero.
                SinceEpoch::advance(Duration::from_secs(60 * 60 * 10));
index 8ca5333f63daabf21a5baed562ec8d0ae2053327..92ea8ffed55a91c218c4481e01a83f924536585d 100644 (file)
@@ -254,11 +254,11 @@ mod tests {
        }
 
        struct WrapperLog {
-               logger: Arc<Logger>
+               logger: Arc<dyn Logger>
        }
 
        impl WrapperLog {
-               fn new(logger: Arc<Logger>) -> WrapperLog {
+               fn new(logger: Arc<dyn Logger>) -> WrapperLog {
                        WrapperLog {
                                logger,
                        }
@@ -278,7 +278,7 @@ mod tests {
        fn test_logging_macros() {
                let mut logger = TestLogger::new();
                logger.enable(Level::Gossip);
-               let logger : Arc<Logger> = Arc::new(logger);
+               let logger : Arc<dyn Logger> = Arc::new(logger);
                let wrapper = WrapperLog::new(Arc::clone(&logger));
                wrapper.call_macros();
        }
index 0d5cfc81906fddacfe24ae97d202fe3f20f7383e..484d603404297d6aaf4e6c59f8f46af2a86fb4dc 100644 (file)
@@ -371,14 +371,14 @@ impl Writeable for BigSize {
        #[inline]
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                match self.0 {
-                       0...0xFC => {
+                       0..=0xFC => {
                                (self.0 as u8).write(writer)
                        },
-                       0xFD...0xFFFF => {
+                       0xFD..=0xFFFF => {
                                0xFDu8.write(writer)?;
                                (self.0 as u16).write(writer)
                        },
-                       0x10000...0xFFFFFFFF => {
+                       0x10000..=0xFFFFFFFF => {
                                0xFEu8.write(writer)?;
                                (self.0 as u32).write(writer)
                        },
index 9bbbec0d78e6a182e6e8e2553e60642db9645f52..557be65d5fa8ddb9f35db92bdeab9c79630e4e03 100644 (file)
@@ -30,9 +30,9 @@ use crate::ln::msgs::LightningError;
 use crate::ln::script::ShutdownScript;
 use crate::offers::invoice::UnsignedBolt12Invoice;
 use crate::offers::invoice_request::UnsignedInvoiceRequest;
-use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId};
+use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId, RoutingFees};
 use crate::routing::utxo::{UtxoLookup, UtxoLookupError, UtxoResult};
-use crate::routing::router::{find_route, InFlightHtlcs, Path, Route, RouteParameters, Router, ScorerAccountingForInFlightHtlcs};
+use crate::routing::router::{find_route, InFlightHtlcs, Path, Route, RouteParameters, RouteHintHop, Router, ScorerAccountingForInFlightHtlcs};
 use crate::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp};
 use crate::sync::RwLock;
 use crate::util::config::UserConfig;
@@ -71,6 +71,7 @@ use crate::sign::{InMemorySigner, Recipient, EntropySource, NodeSigner, SignerPr
 
 #[cfg(feature = "std")]
 use std::time::{SystemTime, UNIX_EPOCH};
+use bitcoin::psbt::PartiallySignedTransaction;
 use bitcoin::Sequence;
 
 pub fn pubkey(byte: u8) -> PublicKey {
@@ -129,6 +130,7 @@ impl<'a> Router for TestRouter<'a> {
                                let scorer = ScorerAccountingForInFlightHtlcs::new(scorer, &inflight_htlcs);
                                for path in &route.paths {
                                        let mut aggregate_msat = 0u64;
+                                       let mut prev_hop_node = payer;
                                        for (idx, hop) in path.hops.iter().rev().enumerate() {
                                                aggregate_msat += hop.fee_msat;
                                                let usage = ChannelUsage {
@@ -137,38 +139,44 @@ impl<'a> Router for TestRouter<'a> {
                                                        effective_capacity: EffectiveCapacity::Unknown,
                                                };
 
-                                               // Since the path is reversed, the last element in our iteration is the first
-                                               // hop.
                                                if idx == path.hops.len() - 1 {
-                                                       let first_hops = match first_hops {
-                                                               Some(hops) => hops,
-                                                               None => continue,
-                                                       };
-                                                       if first_hops.len() == 0 {
-                                                               continue;
+                                                       if let Some(first_hops) = first_hops {
+                                                               if let Some(idx) = first_hops.iter().position(|h| h.get_outbound_payment_scid() == Some(hop.short_channel_id)) {
+                                                                       let node_id = NodeId::from_pubkey(payer);
+                                                                       let candidate = CandidateRouteHop::FirstHop {
+                                                                               details: first_hops[idx],
+                                                                               payer_node_id: &node_id,
+                                                                       };
+                                                                       scorer.channel_penalty_msat(&candidate, usage, &());
+                                                                       continue;
+                                                               }
                                                        }
-                                                       let idx = if first_hops.len() > 1 { route.paths.iter().position(|p| p == path).unwrap_or(0) } else { 0 };
-                                                       let candidate = CandidateRouteHop::FirstHop {
-                                                               details: first_hops[idx],
-                                                               node_id: NodeId::from_pubkey(payer)
+                                               }
+                                               let network_graph = self.network_graph.read_only();
+                                               if let Some(channel) = network_graph.channel(hop.short_channel_id) {
+                                                       let (directed, _) = channel.as_directed_to(&NodeId::from_pubkey(&hop.pubkey)).unwrap();
+                                                       let candidate = CandidateRouteHop::PublicHop {
+                                                               info: directed,
+                                                               short_channel_id: hop.short_channel_id,
                                                        };
                                                        scorer.channel_penalty_msat(&candidate, usage, &());
                                                } else {
-                                                       let network_graph = self.network_graph.read_only();
-                                                       let channel = match network_graph.channel(hop.short_channel_id) {
-                                                               Some(channel) => channel,
-                                                               None => continue,
-                                                       };
-                                                       let channel = match channel.as_directed_to(&NodeId::from_pubkey(&hop.pubkey)) {
-                                                               Some(channel) => channel,
-                                                               None => panic!("Channel directed to {} was not found", hop.pubkey),
-                                                       };
-                                                       let candidate = CandidateRouteHop::PublicHop {
-                                                               info: channel.0,
+                                                       let target_node_id = NodeId::from_pubkey(&hop.pubkey);
+                                                       let route_hint = RouteHintHop {
+                                                               src_node_id: *prev_hop_node,
                                                                short_channel_id: hop.short_channel_id,
+                                                               fees: RoutingFees { base_msat: 0, proportional_millionths: 0 },
+                                                               cltv_expiry_delta: 0,
+                                                               htlc_minimum_msat: None,
+                                                               htlc_maximum_msat: None,
+                                                       };
+                                                       let candidate = CandidateRouteHop::PrivateHop {
+                                                               hint: &route_hint,
+                                                               target_node_id: &target_node_id,
                                                        };
                                                        scorer.channel_penalty_msat(&candidate, usage, &());
                                                }
+                                               prev_hop_node = &hop.pubkey;
                                        }
                                }
                        }
@@ -227,7 +235,7 @@ pub struct TestChainMonitor<'a> {
        pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<TestChannelSigner>)>>,
        pub monitor_updates: Mutex<HashMap<ChannelId, Vec<channelmonitor::ChannelMonitorUpdate>>>,
        pub latest_monitor_update_id: Mutex<HashMap<ChannelId, (OutPoint, u64, MonitorUpdateId)>>,
-       pub chain_monitor: chainmonitor::ChainMonitor<TestChannelSigner, &'a TestChainSource, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a chainmonitor::Persist<TestChannelSigner>>,
+       pub chain_monitor: chainmonitor::ChainMonitor<TestChannelSigner, &'a TestChainSource, &'a dyn chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a dyn chainmonitor::Persist<TestChannelSigner>>,
        pub keys_manager: &'a TestKeysInterface,
        /// If this is set to Some(), the next update_channel call (not watch_channel) must be a
        /// ChannelForceClosed event for the given channel_id with should_broadcast set to the given
@@ -238,7 +246,7 @@ pub struct TestChainMonitor<'a> {
        pub expect_monitor_round_trip_fail: Mutex<Option<ChannelId>>,
 }
 impl<'a> TestChainMonitor<'a> {
-       pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a chainmonitor::Persist<TestChannelSigner>, keys_manager: &'a TestKeysInterface) -> Self {
+       pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a dyn chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a dyn chainmonitor::Persist<TestChannelSigner>, keys_manager: &'a TestKeysInterface) -> Self {
                Self {
                        added_monitors: Mutex::new(Vec::new()),
                        monitor_updates: Mutex::new(HashMap::new()),
@@ -1324,7 +1332,7 @@ impl ScoreLookUp for TestScorer {
        fn channel_penalty_msat(
                &self, candidate: &CandidateRouteHop, usage: ChannelUsage, _score_params: &Self::ScoreParams
        ) -> u64 {
-               let short_channel_id = match candidate.short_channel_id() {
+               let short_channel_id = match candidate.globally_unique_short_channel_id() {
                        Some(scid) => scid,
                        None => return 0,
                };
@@ -1410,7 +1418,8 @@ impl WalletSource for TestWalletSource {
                Ok(ScriptBuf::new_p2pkh(&public_key.pubkey_hash()))
        }
 
-       fn sign_tx(&self, mut tx: Transaction) -> Result<Transaction, ()> {
+       fn sign_psbt(&self, psbt: PartiallySignedTransaction) -> Result<Transaction, ()> {
+               let mut tx = psbt.extract_tx();
                let utxos = self.utxos.borrow();
                for i in 0..tx.input.len() {
                        if let Some(utxo) = utxos.iter().find(|utxo| utxo.outpoint == tx.input[i].previous_output) {
index d73360749dfaa0d9fb9aed985ef3121e6beaa543..3a4acc675e6be5bbe915e70311d3ee7e7d2c328e 100644 (file)
@@ -1,7 +1,7 @@
 [package]
 name = "msrv-check"
 version = "0.1.0"
-edition = "2018"
+edition = "2021"
 
 [dependencies]
 lightning = { path = "../lightning" }
@@ -11,3 +11,4 @@ lightning-net-tokio = { path = "../lightning-net-tokio" }
 lightning-persister = { path = "../lightning-persister" }
 lightning-background-processor = { path = "../lightning-background-processor", features = ["futures"] }
 lightning-rapid-gossip-sync = { path = "../lightning-rapid-gossip-sync" }
+lightning-custom-message = { path = "../lightning-custom-message" }
index 16d2fc110e2a42252dbc83afb623f6a76bff570d..c9d404c922f86012b87e1b961ca451e60c49e0bb 100644 (file)
@@ -1,7 +1,7 @@
 [package]
 name = "no-std-check"
 version = "0.1.0"
-edition = "2018"
+edition = "2021"
 
 [features]
 default = ["lightning/no-std", "lightning-invoice/no-std", "lightning-rapid-gossip-sync/no-std"]